id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,100
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/modeling_sam.py
|
transformers.models.sam.modeling_sam.SamPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
from ...utils import ModelOutput, auto_docstring, logging
from torch import Tensor, nn
@auto_docstring
class SamPreTrainedModel(PreTrainedModel):
config: SamConfig
base_model_prefix = 'sam'
main_input_name = 'pixel_values'
_no_split_modules = ['SamVisionAttention']
supports_gradient_checkpointing = True
_supports_sdpa = True
def _init_weights(self, module: nn.Module):
super()._init_weights(module)
if isinstance(module, SamVisionAttention):
if module.use_rel_pos:
module.rel_pos_h.data.zero_()
module.rel_pos_w.data.zero_()
elif isinstance(module, SamVisionEncoder):
if self.config.use_abs_pos:
module.pos_embed.data.zero_()
|
@auto_docstring
class SamPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 18
| 1
| 17
| 9
| 15
| 0
| 16
| 9
| 14
| 5
| 1
| 2
| 5
|
5,101
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/modeling_sam.py
|
transformers.models.sam.modeling_sam.SamPromptEncoder
|
from torch import Tensor, nn
import torch
from typing import Callable, Optional, Union
from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
import torch.nn.functional as F
class SamPromptEncoder(nn.Module):
def __init__(self, config: SamConfig):
super().__init__()
self.shared_embedding = SamPositionalEmbedding(config.vision_config)
config = config.prompt_encoder_config
self.mask_embed = SamMaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size)
self.input_image_size = config.image_size
self.point_embed = nn.ModuleList([nn.Embedding(1, config.hidden_size) for i in range(config.num_point_embeddings)])
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5
if pad:
target_point_shape = (points.shape[0], points.shape[1], 1, points.shape[-1])
target_labels_shape = (points.shape[0], points.shape[1], 1)
padding_point = torch.zeros(target_point_shape, device=points.device)
padding_label = -torch.ones(target_labels_shape, device=labels.device)
points = torch.cat([points, padding_point], dim=2)
labels = torch.cat([labels, padding_label], dim=2)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
point_embedding = torch.where(labels[..., None] != -10, point_embedding, torch.zeros_like(point_embedding))
point_embedding = torch.where((labels == 0)[:, :, :, None], point_embedding + self.point_embed[0].weight[None, None, :, :], point_embedding)
point_embedding = torch.where((labels == 1)[:, :, :, None], point_embedding + self.point_embed[1].weight[None, None, :, :], point_embedding)
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5
batch_size, nb_boxes = boxes.shape[:2]
coords = boxes.reshape(batch_size, nb_boxes, 2, 2)
input_shape = (self.input_image_size, self.input_image_size)
corner_embedding = self.shared_embedding(coords, input_shape)
corner_embedding[:, :, 0, :] += self.point_embed[2].weight
corner_embedding[:, :, 1, :] += self.point_embed[3].weight
return corner_embedding
def forward(self, input_points: Optional[tuple[torch.Tensor, torch.Tensor]], input_labels: Optional[torch.Tensor], input_boxes: Optional[torch.Tensor], input_masks: Optional[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense embeddings.
Args:
points (`torch.Tensor`, *optional*):
point coordinates and labels to embed.
boxes (`torch.Tensor`, *optional*):
boxes to embed
masks (`torch.Tensor`, *optional*):
masks to embed
"""
sparse_embeddings = None
batch_size = 1
if input_points is not None:
batch_size = input_points.shape[0]
if input_labels is None:
raise ValueError('If points are provided, labels must also be provided.')
point_embeddings = self._embed_points(input_points, input_labels, pad=input_boxes is None)
sparse_embeddings = point_embeddings
if input_boxes is not None:
batch_size = input_boxes.shape[0]
box_embeddings = self._embed_boxes(input_boxes)
if sparse_embeddings is None:
sparse_embeddings = box_embeddings
else:
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2)
if input_masks is not None:
dense_embeddings = self.mask_embed(input_masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1])
return (sparse_embeddings, dense_embeddings)
|
class SamPromptEncoder(nn.Module):
def __init__(self, config: SamConfig):
pass
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
'''Embeds point prompts.'''
pass
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
'''Embeds box prompts.'''
pass
def forward(self, input_points: Optional[tuple[torch.Tensor, torch.Tensor]], input_labels: Optional[torch.Tensor], input_boxes: Optional[torch.Tensor], input_masks: Optional[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor]:
'''
Embeds different types of prompts, returning both sparse and dense embeddings.
Args:
points (`torch.Tensor`, *optional*):
point coordinates and labels to embed.
boxes (`torch.Tensor`, *optional*):
boxes to embed
masks (`torch.Tensor`, *optional*):
masks to embed
'''
pass
| 5
| 3
| 27
| 3
| 20
| 4
| 3
| 0.21
| 1
| 7
| 2
| 0
| 4
| 8
| 4
| 14
| 110
| 13
| 82
| 37
| 71
| 17
| 58
| 30
| 53
| 7
| 1
| 2
| 11
|
5,102
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/modeling_sam.py
|
transformers.models.sam.modeling_sam.SamTwoWayAttentionBlock
|
from transformers.utils.generic import OutputRecorder, TransformersKwargs, check_model_inputs
from ...processing_utils import Unpack
from torch import Tensor, nn
class SamTwoWayAttentionBlock(nn.Module):
def __init__(self, config, attention_downsample_rate: int=2, skip_first_layer_pe: bool=False):
"""
A transformer block with four layers:
(1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on
sparse inputs (4) cross attention of dense inputs -> sparse inputs
Arguments:
config (`SamMaskDecoderConfig`):
The configuration file used to instantiate the block
attention_downsample_rate (*optionalk*, int, defaults to 2):
The downsample ratio of the block used to reduce the inner dim of the attention.
skip_first_layer_pe (*optional*, bool, defaults to `False`):
Whether or not to skip the addition of the query_point_embedding on the first layer.
"""
super().__init__()
self.hidden_size = config.hidden_size
self.layer_norm_eps = config.layer_norm_eps
self.self_attn = SamAttention(config, downsample_rate=1)
self.layer_norm1 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
self.cross_attn_token_to_image = SamAttention(config, downsample_rate=attention_downsample_rate)
self.layer_norm2 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
self.mlp = SamMLPBlock(config)
self.layer_norm3 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
self.layer_norm4 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
self.cross_attn_image_to_token = SamAttention(config, downsample_rate=attention_downsample_rate)
self.skip_first_layer_pe = skip_first_layer_pe
def forward(self, queries: Tensor, keys: Tensor, query_point_embedding: Tensor, key_point_embedding: Tensor, attention_similarity: Tensor, **kwargs: Unpack[TransformersKwargs]):
if self.skip_first_layer_pe:
queries, _ = self.self_attn(query=queries, key=queries, value=queries)
else:
query = queries + query_point_embedding
attn_out, _ = self.self_attn(query=query, key=query, value=queries)
queries = queries + attn_out
queries = self.layer_norm1(queries)
query = queries + query_point_embedding
key = keys + key_point_embedding
attn_out, _ = self.cross_attn_token_to_image(query=query, key=key, value=keys, attention_similarity=attention_similarity)
queries = queries + attn_out
queries = self.layer_norm2(queries)
mlp_out = self.mlp(queries)
queries = queries + mlp_out
queries = self.layer_norm3(queries)
query = queries + query_point_embedding
key = keys + key_point_embedding
attn_out, _ = self.cross_attn_image_to_token(query=key, key=query, value=queries)
keys = keys + attn_out
keys = self.layer_norm4(keys)
return (queries, keys, attn_out)
|
class SamTwoWayAttentionBlock(nn.Module):
def __init__(self, config, attention_downsample_rate: int=2, skip_first_layer_pe: bool=False):
'''
A transformer block with four layers:
(1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on
sparse inputs (4) cross attention of dense inputs -> sparse inputs
Arguments:
config (`SamMaskDecoderConfig`):
The configuration file used to instantiate the block
attention_downsample_rate (*optionalk*, int, defaults to 2):
The downsample ratio of the block used to reduce the inner dim of the attention.
skip_first_layer_pe (*optional*, bool, defaults to `False`):
Whether or not to skip the addition of the query_point_embedding on the first layer.
'''
pass
def forward(self, queries: Tensor, keys: Tensor, query_point_embedding: Tensor, key_point_embedding: Tensor, attention_similarity: Tensor, **kwargs: Unpack[TransformersKwargs]):
pass
| 3
| 1
| 43
| 8
| 27
| 8
| 2
| 0.29
| 1
| 5
| 1
| 0
| 2
| 11
| 2
| 12
| 88
| 17
| 55
| 27
| 44
| 16
| 39
| 19
| 36
| 3
| 1
| 1
| 4
|
5,103
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/modeling_sam.py
|
transformers.models.sam.modeling_sam.SamTwoWayTransformer
|
from ...processing_utils import Unpack
from torch import Tensor, nn
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput
from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
from transformers.utils.generic import OutputRecorder, TransformersKwargs, check_model_inputs
class SamTwoWayTransformer(nn.Module):
def __init__(self, config: SamMaskDecoderConfig):
super().__init__()
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.layers = nn.ModuleList()
for i in range(self.num_hidden_layers):
self.layers.append(SamTwoWayAttentionBlock(config, skip_first_layer_pe=i == 0))
self.final_attn_token_to_image = SamAttention(config)
self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size)
def forward(self, point_embeddings: Tensor, image_embeddings: Tensor, image_positional_embeddings: Tensor, attention_similarity: Tensor, target_embedding=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
if image_embeddings is None:
raise ValueError('You have to specify an image_embedding')
image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
queries = point_embeddings
keys = image_embeddings
for layer in self.layers:
if target_embedding is not None:
queries += target_embedding
queries, keys, _ = layer(queries=queries, keys=keys, query_point_embedding=point_embeddings, key_point_embedding=image_positional_embeddings, attention_similarity=attention_similarity, **kwargs)
query = queries + point_embeddings
key = keys + image_positional_embeddings
attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys)
queries = queries + attn_out
queries = self.layer_norm_final_attn(queries)
return (queries, keys)
|
class SamTwoWayTransformer(nn.Module):
def __init__(self, config: SamMaskDecoderConfig):
pass
def forward(self, point_embeddings: Tensor, image_embeddings: Tensor, image_positional_embeddings: Tensor, attention_similarity: Tensor, target_embedding=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
pass
| 3
| 0
| 34
| 7
| 26
| 2
| 5
| 0.06
| 1
| 8
| 3
| 0
| 2
| 5
| 2
| 12
| 69
| 14
| 52
| 27
| 39
| 3
| 33
| 17
| 30
| 8
| 1
| 2
| 10
|
5,104
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/modeling_sam.py
|
transformers.models.sam.modeling_sam.SamVisionAttention
|
import torch.nn.functional as F
from torch import Tensor, nn
import torch
class SamVisionAttention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(self, config, window_size):
super().__init__()
input_size = (config.image_size // config.patch_size, config.image_size // config.patch_size) if window_size == 0 else (window_size, window_size)
self.num_attention_heads = config.num_attention_heads
head_dim = config.hidden_size // config.num_attention_heads
self.scale = head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias)
self.proj = nn.Linear(config.hidden_size, config.hidden_size)
self.use_rel_pos = config.use_rel_pos
if self.use_rel_pos:
if input_size is None:
raise ValueError('Input size must be provided if using relative positional encoding.')
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int):
size of the query.
k_size (int):
size of key k.
rel_pos (`torch.Tensor`):
relative position embeddings (L, channel).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
rel_pos_resized = F.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear')
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def get_decomposed_rel_pos(self, query: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: tuple[int, int], k_size: tuple[int, int]) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
Args:
query (`torch.Tensor`):
query q in the attention layer with shape (batch_size, query_height * query_width, channel).
rel_pos_h (`torch.Tensor`):
relative position embeddings (Lh, channel) for height axis.
rel_pos_w (`torch.Tensor`):
relative position embeddings (Lw, channel) for width axis.
q_size (tuple):
spatial sequence size of query q with (query_height, query_width).
k_size (tuple):
spatial sequence size of key k with (key_height, key_width).
Returns:
decomposed_rel_pos (`torch.Tensor`):
decomposed relative position embeddings.
"""
query_height, query_width = q_size
key_height, key_width = k_size
relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h)
relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w)
batch_size, _, dim = query.shape
reshaped_query = query.reshape(batch_size, query_height, query_width, dim)
rel_h = torch.einsum('bhwc,hkc->bhwk', reshaped_query, relative_position_height)
rel_w = torch.einsum('bhwc,wkc->bhwk', reshaped_query, relative_position_width)
decomposed_rel_pos = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
return decomposed_rel_pos
def forward(self, hidden_states: torch.Tensor, output_attentions=None) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, height, width, _ = hidden_states.shape
qkv = self.qkv(hidden_states).reshape(batch_size, height * width, 3, self.num_attention_heads, -1).permute(2, 0, 3, 1, 4)
query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0)
attn_weights = query * self.scale @ key.transpose(-2, -1)
if self.use_rel_pos:
decomposed_rel_pos = self.get_decomposed_rel_pos(query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width))
decomposed_rel_pos = decomposed_rel_pos.reshape_as(attn_weights)
attn_weights = attn_weights + decomposed_rel_pos
attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype)
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = (attn_probs @ value).reshape(batch_size, self.num_attention_heads, height, width, -1)
attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1)
attn_output = self.proj(attn_output)
return (attn_output, attn_weights)
|
class SamVisionAttention(nn.Module):
'''Multi-head Attention block with relative position embeddings.'''
def __init__(self, config, window_size):
pass
def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
'''
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int):
size of the query.
k_size (int):
size of key k.
rel_pos (`torch.Tensor`):
relative position embeddings (L, channel).
Returns:
Extracted positional embeddings according to relative positions.
'''
pass
def get_decomposed_rel_pos(self, query: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: tuple[int, int], k_size: tuple[int, int]) -> torch.Tensor:
'''
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
Args:
query (`torch.Tensor`):
query q in the attention layer with shape (batch_size, query_height * query_width, channel).
rel_pos_h (`torch.Tensor`):
relative position embeddings (Lh, channel) for height axis.
rel_pos_w (`torch.Tensor`):
relative position embeddings (Lw, channel) for width axis.
q_size (tuple):
spatial sequence size of query q with (query_height, query_width).
k_size (tuple):
spatial sequence size of key k with (key_height, key_width).
Returns:
decomposed_rel_pos (`torch.Tensor`):
decomposed relative position embeddings.
'''
pass
def forward(self, hidden_states: torch.Tensor, output_attentions=None) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 5
| 3
| 33
| 5
| 19
| 10
| 2
| 0.51
| 1
| 4
| 0
| 1
| 4
| 8
| 4
| 14
| 138
| 23
| 76
| 43
| 63
| 39
| 53
| 35
| 48
| 4
| 1
| 2
| 9
|
5,105
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/modeling_sam.py
|
transformers.models.sam.modeling_sam.SamVisionEncoder
|
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from transformers.utils.generic import OutputRecorder, TransformersKwargs, check_model_inputs
from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
class SamVisionEncoder(SamPreTrainedModel):
_can_record_outputs = {'hidden_states': SamVisionLayer, 'attentions': SamVisionAttention}
def __init__(self, config: SamVisionConfig):
super().__init__(config)
self.config = config
self.image_size = config.image_size
self.patch_embed = SamPatchEmbeddings(config)
self.pos_embed = None
if config.use_abs_pos:
self.pos_embed = nn.Parameter(torch.zeros(1, config.image_size // config.patch_size, config.image_size // config.patch_size, config.hidden_size))
self.layers = nn.ModuleList()
for i in range(config.num_hidden_layers):
layer = SamVisionLayer(config, window_size=config.window_size if i not in config.global_attn_indexes else 0)
self.layers.append(layer)
self.neck = SamVisionNeck(config)
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.patch_embed
@check_model_inputs
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> SamVisionEncoderOutput:
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.patch_embed(pixel_values)
if self.pos_embed is not None:
hidden_states = hidden_states + self.pos_embed
for layer_module in self.layers:
hidden_states = layer_module(hidden_states)
hidden_states = self.neck(hidden_states)
return SamVisionEncoderOutput(last_hidden_state=hidden_states)
|
class SamVisionEncoder(SamPreTrainedModel):
def __init__(self, config: SamVisionConfig):
pass
def get_input_embeddings(self):
pass
@check_model_inputs
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> SamVisionEncoderOutput:
pass
| 5
| 0
| 30
| 5
| 24
| 0
| 7
| 0.01
| 1
| 10
| 5
| 0
| 3
| 7
| 3
| 13
| 93
| 18
| 74
| 25
| 64
| 1
| 48
| 19
| 44
| 16
| 1
| 2
| 21
|
5,106
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/modeling_sam.py
|
transformers.models.sam.modeling_sam.SamVisionLayer
|
import torch.nn.functional as F
from torch import Tensor, nn
from ...modeling_layers import GradientCheckpointingLayer
import torch
class SamVisionLayer(GradientCheckpointingLayer):
def __init__(self, config, window_size):
super().__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attn = SAM_VISION_ATTENTION_CLASSES[config._attn_implementation](config, window_size)
self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = SamMLPBlock(config)
self.window_size = window_size
def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> tuple[torch.Tensor, tuple[int, int]]:
"""
Args:
Partition into non-overlapping windows with padding if needed.
hidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window
size.
Returns:
windows: windows after partition with [batch_size * num_windows, window_size, window_size, channel].
(pad_height, pad_width): padded height and width before partition
"""
batch_size, height, width, channel = hidden_states.shape
pad_h = (window_size - height % window_size) % window_size
pad_w = (window_size - width % window_size) % window_size
hidden_states = F.pad(hidden_states, (0, 0, 0, pad_w, 0, pad_h))
pad_height, pad_width = (height + pad_h, width + pad_w)
hidden_states = hidden_states.reshape(batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel)
windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(-1, window_size, window_size, channel)
return (windows, (pad_height, pad_width))
def window_unpartition(self, windows: torch.Tensor, window_size: int, padding_shape: tuple[int, int], original_shape: tuple[int, int]) -> torch.Tensor:
"""
Args:
Window unpartition into original sequences and removing padding.
hidden_states (tensor):
input tokens with [batch_size * num_windows, window_size, window_size, channel].
window_size (int):
window size.
padding_shape (Tuple):
padded height and width (pad_height, pad_width).
original_shape (Tuple): original height and width (height, width) before padding.
Returns:
hidden_states: unpartitioned sequences with [batch_size, height, width, channel].
"""
pad_height, pad_width = padding_shape
height, width = original_shape
batch_size = windows.shape[0] // (pad_height * pad_width // window_size // window_size)
hidden_states = windows.reshape(batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1)
hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(batch_size, pad_height, pad_width, -1)
hidden_states = hidden_states[:, :height, :width, :].contiguous()
return hidden_states
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.FloatTensor]:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
if self.window_size > 0:
height, width = (hidden_states.shape[1], hidden_states.shape[2])
hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size)
hidden_states, attn_weights = self.attn(hidden_states=hidden_states)
if self.window_size > 0:
hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width))
hidden_states = residual + hidden_states
layernorm_output = self.layer_norm2(hidden_states)
hidden_states = hidden_states + self.mlp(layernorm_output)
return hidden_states
|
class SamVisionLayer(GradientCheckpointingLayer):
def __init__(self, config, window_size):
pass
def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> tuple[torch.Tensor, tuple[int, int]]:
'''
Args:
Partition into non-overlapping windows with padding if needed.
hidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window
size.
Returns:
windows: windows after partition with [batch_size * num_windows, window_size, window_size, channel].
(pad_height, pad_width): padded height and width before partition
'''
pass
def window_unpartition(self, windows: torch.Tensor, window_size: int, padding_shape: tuple[int, int], original_shape: tuple[int, int]) -> torch.Tensor:
'''
Args:
Window unpartition into original sequences and removing padding.
hidden_states (tensor):
input tokens with [batch_size * num_windows, window_size, window_size, channel].
window_size (int):
window size.
padding_shape (Tuple):
padded height and width (pad_height, pad_width).
original_shape (Tuple): original height and width (height, width) before padding.
Returns:
hidden_states: unpartitioned sequences with [batch_size, height, width, channel].
'''
pass
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.FloatTensor]:
pass
| 5
| 2
| 22
| 3
| 14
| 6
| 2
| 0.43
| 1
| 5
| 1
| 0
| 4
| 5
| 4
| 14
| 93
| 13
| 56
| 31
| 45
| 24
| 41
| 25
| 36
| 4
| 1
| 1
| 7
|
5,107
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/modeling_sam.py
|
transformers.models.sam.modeling_sam.SamVisionNeck
|
from torch import Tensor, nn
from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
class SamVisionNeck(nn.Module):
def __init__(self, config: SamVisionConfig):
super().__init__()
self.config = config
self.conv1 = nn.Conv2d(config.hidden_size, config.output_channels, kernel_size=1, bias=False)
self.layer_norm1 = SamLayerNorm(config.output_channels, data_format='channels_first')
self.conv2 = nn.Conv2d(config.output_channels, config.output_channels, kernel_size=3, padding=1, bias=False)
self.layer_norm2 = SamLayerNorm(config.output_channels, data_format='channels_first')
def forward(self, hidden_states):
hidden_states = hidden_states.permute(0, 3, 1, 2)
hidden_states = self.conv1(hidden_states)
hidden_states = self.layer_norm1(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.layer_norm2(hidden_states)
return hidden_states
|
class SamVisionNeck(nn.Module):
def __init__(self, config: SamVisionConfig):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 1
| 0
| 1
| 3
| 2
| 0
| 2
| 5
| 2
| 12
| 18
| 3
| 15
| 8
| 12
| 0
| 15
| 8
| 12
| 1
| 1
| 0
| 2
|
5,108
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/modeling_sam.py
|
transformers.models.sam.modeling_sam.SamVisionSdpaAttention
|
import torch
import torch.nn.functional as F
class SamVisionSdpaAttention(SamVisionAttention):
"""
Multi-head Attention block with relative position embeddings.
Using SDPA instead of the default attention.
"""
def __init__(self, config, window_size):
super().__init__(config, window_size)
def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor:
if output_attentions:
logger.warning_once('`SamVisionSdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
return super().forward(hidden_states=hidden_states, output_attentions=output_attentions)
batch_size, height, width, _ = hidden_states.shape
qkv = self.qkv(hidden_states).reshape(batch_size, height * width, 3, self.num_attention_heads, -1).permute(2, 0, 3, 1, 4)
query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0)
attn_bias = None
if self.use_rel_pos:
decomposed_rel_pos = self.get_decomposed_rel_pos(query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width))
decomposed_rel_pos = decomposed_rel_pos.reshape(batch_size, self.num_attention_heads, height * width, height * width)
attn_bias = decomposed_rel_pos
query = query.view(batch_size, self.num_attention_heads, height * width, -1)
key = key.view(batch_size, self.num_attention_heads, height * width, -1)
value = value.view(batch_size, self.num_attention_heads, height * width, -1)
attn_output = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attn_bias)
attn_output = attn_output.view(batch_size, self.num_attention_heads, height, width, -1).permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1)
attn_output = self.proj(attn_output)
return (attn_output, None)
|
class SamVisionSdpaAttention(SamVisionAttention):
'''
Multi-head Attention block with relative position embeddings.
Using SDPA instead of the default attention.
'''
def __init__(self, config, window_size):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor:
pass
| 3
| 1
| 30
| 3
| 21
| 6
| 2
| 0.34
| 1
| 3
| 0
| 0
| 3
| 0
| 3
| 17
| 99
| 13
| 64
| 27
| 53
| 22
| 43
| 20
| 39
| 5
| 2
| 2
| 7
|
5,109
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/processing_sam.py
|
transformers.models.sam.processing_sam.SamImagesKwargs
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin
from typing import Optional, Union
from ...image_utils import ImageInput
class SamImagesKwargs(ImagesKwargs):
segmentation_maps: Optional[ImageInput]
input_points: Optional[list[list[float]]]
input_labels: Optional[list[list[int]]]
input_boxes: Optional[list[list[list[float]]]]
point_pad_value: Optional[int]
|
class SamImagesKwargs(ImagesKwargs):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0
| 6
| 1
| 5
| 0
| 6
| 1
| 5
| 0
| 2
| 0
| 0
|
5,110
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/processing_sam.py
|
transformers.models.sam.processing_sam.SamProcessor
|
import numpy as np
from typing import Optional, Union
from ...tokenization_utils_base import AudioInput, BatchEncoding, PreTokenizedInput, TextInput
from ...video_utils import VideoInput
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin
from copy import deepcopy
from ...image_utils import ImageInput
class SamProcessor(ProcessorMixin):
"""
Constructs a SAM processor which wraps a SAM image processor and an 2D points & Bounding boxes processor into a
single processor.
[`SamProcessor`] offers all the functionalities of [`SamImageProcessor`]. See the docstring of
[`~SamImageProcessor.__call__`] for more information.
Args:
image_processor (`SamImageProcessor`):
An instance of [`SamImageProcessor`]. The image processor is a required input.
"""
attributes = ['image_processor']
image_processor_class = 'SamImageProcessor'
def __init__(self, image_processor):
super().__init__(image_processor)
self.target_size = self.image_processor.size['longest_edge']
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audio: Optional[AudioInput]=None, video: Optional[VideoInput]=None, **kwargs) -> BatchEncoding:
"""
This method uses [`SamImageProcessor.__call__`] method to prepare image(s) for the model. It also prepares 2D
points and bounding boxes for the model if they are provided.
"""
output_kwargs = self._merge_kwargs(SamProcessorKwargs, tokenizer_init_kwargs={}, **kwargs)
input_points = output_kwargs['images_kwargs'].pop('input_points', None)
input_labels = output_kwargs['images_kwargs'].pop('input_labels', None)
input_boxes = output_kwargs['images_kwargs'].pop('input_boxes', None)
point_pad_value = output_kwargs['images_kwargs'].pop('point_pad_value', None)
encoding_image_processor = self.image_processor(images, **output_kwargs['images_kwargs'])
original_sizes = encoding_image_processor['original_sizes']
if hasattr(original_sizes, 'numpy'):
original_sizes = original_sizes.numpy()
input_points, input_labels, input_boxes = self._check_and_preprocess_points(input_points=input_points, input_labels=input_labels, input_boxes=input_boxes)
encoding_image_processor = self._normalize_and_convert(encoding_image_processor, original_sizes, input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, return_tensors=output_kwargs['common_kwargs'].get('return_tensors'), point_pad_value=point_pad_value)
return encoding_image_processor
def _normalize_and_convert(self, encoding_image_processor, original_sizes, input_points=None, input_labels=None, input_boxes=None, return_tensors='pt', point_pad_value=-10):
if input_points is not None:
if len(original_sizes) != len(input_points):
input_points = [self._normalize_coordinates(self.target_size, point, original_sizes[0]) for point in input_points]
else:
input_points = [self._normalize_coordinates(self.target_size, point, original_size) for point, original_size in zip(input_points, original_sizes)]
if not all((point.shape == input_points[0].shape for point in input_points)):
if input_labels is not None:
input_points, input_labels = self._pad_points_and_labels(input_points, input_labels, point_pad_value)
input_points = np.array(input_points)
if input_labels is not None:
input_labels = np.array(input_labels)
if input_boxes is not None:
if len(original_sizes) != len(input_boxes):
input_boxes = [self._normalize_coordinates(self.target_size, box, original_sizes[0], is_bounding_box=True) for box in input_boxes]
else:
input_boxes = [self._normalize_coordinates(self.target_size, box, original_size, is_bounding_box=True) for box, original_size in zip(input_boxes, original_sizes)]
input_boxes = np.array(input_boxes)
if input_boxes is not None:
if return_tensors == 'pt':
input_boxes = torch.from_numpy(input_boxes)
input_boxes = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes})
if input_points is not None:
if return_tensors == 'pt':
input_points = torch.from_numpy(input_points)
input_points = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({'input_points': input_points})
if input_labels is not None:
if return_tensors == 'pt':
input_labels = torch.from_numpy(input_labels)
input_labels = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels})
return encoding_image_processor
def _pad_points_and_labels(self, input_points, input_labels, point_pad_value):
"""
The method pads the 2D points and labels to the maximum number of points in the batch.
"""
expected_nb_points = max([point.shape[0] for point in input_points])
processed_input_points = []
for i, point in enumerate(input_points):
if point.shape[0] != expected_nb_points:
point = np.concatenate([point, np.zeros((expected_nb_points - point.shape[0], 2)) + point_pad_value], axis=0)
input_labels[i] = np.append(input_labels[i], [point_pad_value])
processed_input_points.append(point)
input_points = processed_input_points
return (input_points, input_labels)
def _normalize_coordinates(self, target_size: int, coords: np.ndarray, original_size, is_bounding_box=False) -> np.ndarray:
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.image_processor._get_preprocess_shape(original_size, longest_edge=target_size)
coords = deepcopy(coords).astype(float)
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _check_and_preprocess_points(self, input_points=None, input_labels=None, input_boxes=None):
"""
Check and preprocesses the 2D points, labels and bounding boxes. It checks if the input is valid and if they
are, it converts the coordinates of the points and bounding boxes. If a user passes directly a `torch.Tensor`,
it is converted to a `numpy.ndarray` and then to a `list`.
"""
if input_points is not None:
if hasattr(input_points, 'numpy'):
input_points = input_points.numpy().tolist()
if not isinstance(input_points, list) or not isinstance(input_points[0], list):
raise ValueError('Input points must be a list of list of floating points.')
input_points = [np.array(input_point) for input_point in input_points]
else:
input_points = None
if input_labels is not None:
if hasattr(input_labels, 'numpy'):
input_labels = input_labels.numpy().tolist()
if not isinstance(input_labels, list) or not isinstance(input_labels[0], list):
raise ValueError('Input labels must be a list of list integers.')
input_labels = [np.array(label) for label in input_labels]
else:
input_labels = None
if input_boxes is not None:
if hasattr(input_boxes, 'numpy'):
input_boxes = input_boxes.numpy().tolist()
if not isinstance(input_boxes, list) or not isinstance(input_boxes[0], list) or (not isinstance(input_boxes[0][0], list)):
raise ValueError('Input boxes must be a list of list of list of floating points.')
input_boxes = [np.array(box).astype(np.float32) for box in input_boxes]
else:
input_boxes = None
return (input_points, input_labels, input_boxes)
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
return list(image_processor_input_names + ['original_sizes', 'reshaped_input_sizes'])
def post_process_masks(self, *args, **kwargs):
return self.image_processor.post_process_masks(*args, **kwargs)
|
class SamProcessor(ProcessorMixin):
'''
Constructs a SAM processor which wraps a SAM image processor and an 2D points & Bounding boxes processor into a
single processor.
[`SamProcessor`] offers all the functionalities of [`SamImageProcessor`]. See the docstring of
[`~SamImageProcessor.__call__`] for more information.
Args:
image_processor (`SamImageProcessor`):
An instance of [`SamImageProcessor`]. The image processor is a required input.
'''
def __init__(self, image_processor):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audio: Optional[AudioInput]=None, video: Optional[VideoInput]=None, **kwargs) -> BatchEncoding:
'''
This method uses [`SamImageProcessor.__call__`] method to prepare image(s) for the model. It also prepares 2D
points and bounding boxes for the model if they are provided.
'''
pass
def _normalize_and_convert(self, encoding_image_processor, original_sizes, input_points=None, input_labels=None, input_boxes=None, return_tensors='pt', point_pad_value=-10):
pass
def _pad_points_and_labels(self, input_points, input_labels, point_pad_value):
'''
The method pads the 2D points and labels to the maximum number of points in the batch.
'''
pass
def _normalize_coordinates(self, target_size: int, coords: np.ndarray, original_size, is_bounding_box=False) -> np.ndarray:
'''
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
'''
pass
def _check_and_preprocess_points(self, input_points=None, input_labels=None, input_boxes=None):
'''
Check and preprocesses the 2D points, labels and bounding boxes. It checks if the input is valid and if they
are, it converts the coordinates of the points and bounding boxes. If a user passes directly a `torch.Tensor`,
it is converted to a `numpy.ndarray` and then to a `list`.
'''
pass
@property
def model_input_names(self):
pass
def post_process_masks(self, *args, **kwargs):
pass
| 10
| 5
| 28
| 3
| 22
| 4
| 6
| 0.23
| 1
| 10
| 2
| 0
| 8
| 1
| 8
| 25
| 254
| 32
| 183
| 50
| 149
| 42
| 106
| 25
| 97
| 23
| 2
| 3
| 44
|
5,111
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/sam/processing_sam.py
|
transformers.models.sam.processing_sam.SamProcessorKwargs
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin
class SamProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: SamImagesKwargs
_defaults = {'images_kwargs': {'point_pad_value': -10}}
|
class SamProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0
| 7
| 2
| 6
| 0
| 3
| 2
| 2
| 0
| 3
| 0
| 0
|
5,112
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py
|
transformers.models.seamless_m4t.configuration_seamless_m4t.SeamlessM4TConfig
|
from ...configuration_utils import PretrainedConfig
class SeamlessM4TConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`~SeamlessM4TModel`]. It is used to instantiate an
SeamlessM4T model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SeamlessM4T
["facebook/hf-seamless-m4t-medium"](https://huggingface.co/"facebook/hf-seamless-m4t-medium") architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256102):
Vocabulary size of the SeamlessM4T model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`~SeamlessM4TModel`], [`~SeamlessM4TForTextToSpeech`] or
[`~SeamlessM4TForTextToText`].
t2u_vocab_size (`int`, *optional*, defaults to 10082):
Unit vocabulary size of the SeamlessM4T model. Defines the number of different unit tokens that can be
represented by the `inputs_ids` passed when calling the Text-To-Units sub-model of [`~SeamlessM4TModel`],
[`~SeamlessM4TForSpeechToSpeech`] or [`~SeamlessM4TForTextToSpeech`].
> Parameters shared across sub-models
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" layers in the architecture.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model text encoder and decoder might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
encoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the encoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the decoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder and feed-forward layers. If string,
`"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, decoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all attention layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all activation layers in the model.
scale_embedding (`bool`, *optional*, defaults to `True`):
Scale embeddings by diving by sqrt(d_model).
> Text encoder and text decoder specific parameters
encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text encoder.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text encoder.
decoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text decoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text decoder.
decoder_start_token_id (`int`, *optional*, defaults to 3):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
applied in the text decoder.
max_new_tokens (`int`, *optional*, defaults to 256):
The maximum numbers of text tokens to generate, ignoring the number of tokens in the prompt.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the _padding_ text token. Only applied to the text-decoder model.
bos_token_id (`int`, *optional*, defaults to 2):
The id of the _beginning-of-stream_ text token. Only applied to the text-decoder model.
eos_token_id (`int`, *optional*, defaults to 3):
The id of the _end-of-stream_ text token. Only applied to the text-decoder model.
> Speech encoder specific parameters
speech_encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer speech encoder.
speech_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer speech encoder.
speech_encoder_intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer speech encoder.
speech_encoder_hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
The non-linear activation function (function or string) in the speech encoder. If string, `"gelu"`,
`"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
speech_encoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all layers in the speech encoder.
add_adapter (`bool`, *optional*, defaults to `True`):
Add an adapter layer on top of the speech encoder.
speech_encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability for the speech encoder. See the [LayerDrop paper](see
https://huggingface.co/papers/1909.11556) for more details.
feature_projection_input_dim (`int`, *optional*, defaults to 160):
Input dimension of the input feature projection of the speech encoder, i.e the dimension after processing
input audios with [`SeamlessM4TFeatureExtractor`].
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer of the speech encoder.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer of the speech encoder.
adaptor_kernel_size (`int`, *optional*, defaults to 8):
Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_stride (`int`, *optional*, defaults to 8):
Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all layers in the speech adapter.
num_adapter_layers (`int`, *optional*, defaults to 1):
Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
True`.
position_embeddings_type (`str`, *optional*, defaults to `"relative"`):
Can be specified to `relative` or `rotary` for relative or rotary position embeddings respectively. If left
`None` no relative position embedding is applied. Only applied to the speech encoder.
rotary_embedding_base (`int`, *optional*, defaults to 10000):
If `"rotary"` position embeddings are used, defines the size of the embedding base. Only applied to the
speech encoder.
max_source_positions (`int`, *optional*, defaults to 4096):
if `"relative"` position embeddings are used, defines the maximum source input positions. Only applied to
the speech encoder.
conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
Kernel size of convolutional depthwise 1D layer in Conformer blocks. Only applied to the speech encoder.
> Text-To-Unit (t2u) model specific parameters
t2u_bos_token_id (`int`, *optional*, defaults to 0):
The id of the _beginning-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_pad_token_id (`int`, *optional*, defaults to 1):
The id of the _padding_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_eos_token_id (`int`, *optional*, defaults to 2):
The id of the _end-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_decoder_start_token_id (`int`, *optional*, defaults to 2):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
applied to the text-to-unit seq2seq model.
t2u_max_new_tokens (`int`, *optional*, defaults to 1024):
The maximum numbers of unit tokens to generate, ignoring the number of tokens in the prompt. Only applied
to the text-to-unit seq2seq model.
t2u_encoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit encoder.
t2u_encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit encoder.
t2u_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit encoder.
t2u_decoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit decoder.
t2u_decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit decoder.
t2u_decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit decoder.
t2u_max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model text-to-unit component might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
> Hifi-Gan Vocoder specific parameters
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the output audio will be generated, expressed in hertz (Hz).
upsample_initial_channel (`int`, *optional*, defaults to 512):
The number of input channels into the hifi-gan upsampling network. Applies to the vocoder only.
upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[5, 4, 4, 2, 2]`):
A tuple of integers defining the stride of each 1D convolutional layer in the vocoder upsampling network.
The length of *upsample_rates* defines the number of convolutional layers and has to match the length of
*upsample_kernel_sizes*. Applies to the vocoder only.
upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[11, 8, 8, 4, 4]`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the vocoder upsampling
network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match
the length of *upsample_rates*. Applies to the vocoder only.
resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`):
A tuple of integers defining the kernel sizes of the vocoder 1D convolutional layers in the multi-receptive
field fusion (MRF) module. Applies to the vocoder only.
resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
A nested tuple of integers defining the dilation rates of the vocoder dilated 1D convolutional layers in
the multi-receptive field fusion (MRF) module. Applies to the vocoder only.
leaky_relu_slope (`float`, *optional*, defaults to 0.1):
The angle of the negative slope used by the leaky ReLU activation in the vocoder. Applies to the vocoder
only.
unit_hifi_gan_vocab_size (`int`, *optional*, defaults to 10000):
Vocabulary size of the SeamlessM4T vocoder. Defines the number of different unit tokens that can be
represented by the `inputs_ids` passed when calling the vocoder of [`~SeamlessM4TModel`],
[`~SeamlessM4TForSpeechToSpeech`] or [`~SeamlessM4TForTextToSpeech`].
unit_embed_dim (`int`, *optional*, defaults to 1280):
The projection dimension of the input ids given to the hifi-gan vocoder. Applies to the vocoder only.
lang_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the target language given to the hifi-gan vocoder. Applies to the vocoder only.
spkr_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the speaker id given to the hifi-gan vocoder. Applies to the vocoder only.
vocoder_num_langs (`int`, *optional*, defaults to 36):
Number of langs supported by the vocoder. Might be different from `t2u_num_langs`.
vocoder_num_spkrs (`int`, *optional*, defaults to 200):
Number of speakers supported by the vocoder.
variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the duration predictor. Applies to the vocoder only.
var_pred_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability of the duration predictor. Applies to the vocoder only.
vocoder_offset (`int`, *optional*, defaults to 4):
Offset the unit token ids by this number to account for symbol tokens. Applies to the vocoder only.
```python
>>> from transformers import SeamlessM4TModel, SeamlessM4TConfig
>>> # Initializing a SeamlessM4T "facebook/hf-seamless-m4t-medium" style configuration
>>> configuration = SeamlessM4TConfig()
>>> # Initializing a model from the "facebook/hf-seamless-m4t-medium" style configuration
>>> model = SeamlessM4TModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'seamless_m4t'
def __init__(self, vocab_size=256102, t2u_vocab_size=10082, hidden_size=1024, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, max_position_embeddings=1024, is_encoder_decoder=True, encoder_layerdrop=0.05, decoder_layerdrop=0.05, activation_function='relu', dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, scale_embedding=True, encoder_layers=24, encoder_ffn_dim=8192, encoder_attention_heads=16, decoder_layers=24, decoder_ffn_dim=8192, decoder_attention_heads=16, decoder_start_token_id=3, max_new_tokens=256, pad_token_id=0, bos_token_id=2, eos_token_id=3, speech_encoder_layers=24, speech_encoder_attention_heads=16, speech_encoder_intermediate_size=4096, speech_encoder_hidden_act='swish', speech_encoder_dropout=0.0, add_adapter=True, speech_encoder_layerdrop=0.1, feature_projection_input_dim=160, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, adaptor_kernel_size=8, adaptor_stride=8, adaptor_dropout=0.1, num_adapter_layers=1, position_embeddings_type='relative', rotary_embedding_base=10000, max_source_positions=4096, conv_depthwise_kernel_size=31, t2u_bos_token_id=0, t2u_pad_token_id=1, t2u_eos_token_id=2, t2u_decoder_start_token_id=2, t2u_max_new_tokens=1024, t2u_encoder_layers=6, t2u_encoder_ffn_dim=8192, t2u_encoder_attention_heads=16, t2u_decoder_layers=6, t2u_decoder_ffn_dim=8192, t2u_decoder_attention_heads=16, t2u_max_position_embeddings=2048, sampling_rate=16000, upsample_initial_channel=512, upsample_rates=[5, 4, 4, 2, 2], upsample_kernel_sizes=[11, 8, 8, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], leaky_relu_slope=0.1, unit_hifi_gan_vocab_size=10000, unit_embed_dim=1280, lang_embed_dim=256, spkr_embed_dim=256, vocoder_num_langs=36, vocoder_num_spkrs=200, variance_predictor_kernel_size=3, var_pred_dropout=0.5, vocoder_offset=4, **kwargs):
self.vocab_size = vocab_size
self.t2u_vocab_size = t2u_vocab_size
self.hidden_size = hidden_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.max_position_embeddings = max_position_embeddings
self.use_cache = use_cache
self.max_new_tokens = max_new_tokens
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.activation_function = activation_function
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.scale_embedding = scale_embedding
self.num_attention_heads = decoder_attention_heads
self.num_hidden_layers = decoder_layers
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_attention_heads = decoder_attention_heads
self.speech_encoder_layers = speech_encoder_layers
self.speech_encoder_hidden_act = speech_encoder_hidden_act
self.speech_encoder_dropout = speech_encoder_dropout
self.speech_encoder_attention_heads = speech_encoder_attention_heads
self.speech_encoder_layerdrop = speech_encoder_layerdrop
self.speech_encoder_intermediate_size = speech_encoder_intermediate_size
self.feature_projection_input_dim = feature_projection_input_dim
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.adaptor_kernel_size = adaptor_kernel_size
self.adaptor_stride = adaptor_stride
self.adaptor_dropout = adaptor_dropout
self.num_adapter_layers = num_adapter_layers
self.position_embeddings_type = position_embeddings_type
self.rotary_embedding_base = rotary_embedding_base
self.max_source_positions = max_source_positions
self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
self.add_adapter = add_adapter
self.t2u_bos_token_id = t2u_bos_token_id
self.t2u_pad_token_id = t2u_pad_token_id
self.t2u_eos_token_id = t2u_eos_token_id
self.t2u_decoder_start_token_id = t2u_decoder_start_token_id
self.t2u_max_new_tokens = t2u_max_new_tokens
self.t2u_encoder_layers = t2u_encoder_layers
self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim
self.t2u_encoder_attention_heads = t2u_encoder_attention_heads
self.t2u_decoder_layers = t2u_decoder_layers
self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim
self.t2u_decoder_attention_heads = t2u_decoder_attention_heads
self.t2u_max_position_embeddings = t2u_max_position_embeddings
self.sampling_rate = sampling_rate
self.upsample_initial_channel = upsample_initial_channel
self.upsample_rates = upsample_rates
self.upsample_kernel_sizes = upsample_kernel_sizes
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.leaky_relu_slope = leaky_relu_slope
self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size
self.unit_embed_dim = unit_embed_dim
self.lang_embed_dim = lang_embed_dim
self.spkr_embed_dim = spkr_embed_dim
self.vocoder_num_langs = vocoder_num_langs
self.vocoder_num_spkrs = vocoder_num_spkrs
self.variance_predictor_kernel_size = variance_predictor_kernel_size
self.var_pred_dropout = var_pred_dropout
self.vocoder_offset = vocoder_offset
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, is_encoder_decoder=is_encoder_decoder, max_position_embeddings=max_position_embeddings, **kwargs)
|
class SeamlessM4TConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`~SeamlessM4TModel`]. It is used to instantiate an
SeamlessM4T model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SeamlessM4T
["facebook/hf-seamless-m4t-medium"](https://huggingface.co/"facebook/hf-seamless-m4t-medium") architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256102):
Vocabulary size of the SeamlessM4T model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`~SeamlessM4TModel`], [`~SeamlessM4TForTextToSpeech`] or
[`~SeamlessM4TForTextToText`].
t2u_vocab_size (`int`, *optional*, defaults to 10082):
Unit vocabulary size of the SeamlessM4T model. Defines the number of different unit tokens that can be
represented by the `inputs_ids` passed when calling the Text-To-Units sub-model of [`~SeamlessM4TModel`],
[`~SeamlessM4TForSpeechToSpeech`] or [`~SeamlessM4TForTextToSpeech`].
> Parameters shared across sub-models
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" layers in the architecture.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model text encoder and decoder might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
encoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the encoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the decoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder and feed-forward layers. If string,
`"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, decoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all attention layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all activation layers in the model.
scale_embedding (`bool`, *optional*, defaults to `True`):
Scale embeddings by diving by sqrt(d_model).
> Text encoder and text decoder specific parameters
encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text encoder.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text encoder.
decoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text decoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text decoder.
decoder_start_token_id (`int`, *optional*, defaults to 3):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
applied in the text decoder.
max_new_tokens (`int`, *optional*, defaults to 256):
The maximum numbers of text tokens to generate, ignoring the number of tokens in the prompt.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the _padding_ text token. Only applied to the text-decoder model.
bos_token_id (`int`, *optional*, defaults to 2):
The id of the _beginning-of-stream_ text token. Only applied to the text-decoder model.
eos_token_id (`int`, *optional*, defaults to 3):
The id of the _end-of-stream_ text token. Only applied to the text-decoder model.
> Speech encoder specific parameters
speech_encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer speech encoder.
speech_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer speech encoder.
speech_encoder_intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer speech encoder.
speech_encoder_hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
The non-linear activation function (function or string) in the speech encoder. If string, `"gelu"`,
`"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
speech_encoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all layers in the speech encoder.
add_adapter (`bool`, *optional*, defaults to `True`):
Add an adapter layer on top of the speech encoder.
speech_encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability for the speech encoder. See the [LayerDrop paper](see
https://huggingface.co/papers/1909.11556) for more details.
feature_projection_input_dim (`int`, *optional*, defaults to 160):
Input dimension of the input feature projection of the speech encoder, i.e the dimension after processing
input audios with [`SeamlessM4TFeatureExtractor`].
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer of the speech encoder.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer of the speech encoder.
adaptor_kernel_size (`int`, *optional*, defaults to 8):
Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_stride (`int`, *optional*, defaults to 8):
Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all layers in the speech adapter.
num_adapter_layers (`int`, *optional*, defaults to 1):
Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
True`.
position_embeddings_type (`str`, *optional*, defaults to `"relative"`):
Can be specified to `relative` or `rotary` for relative or rotary position embeddings respectively. If left
`None` no relative position embedding is applied. Only applied to the speech encoder.
rotary_embedding_base (`int`, *optional*, defaults to 10000):
If `"rotary"` position embeddings are used, defines the size of the embedding base. Only applied to the
speech encoder.
max_source_positions (`int`, *optional*, defaults to 4096):
if `"relative"` position embeddings are used, defines the maximum source input positions. Only applied to
the speech encoder.
conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
Kernel size of convolutional depthwise 1D layer in Conformer blocks. Only applied to the speech encoder.
> Text-To-Unit (t2u) model specific parameters
t2u_bos_token_id (`int`, *optional*, defaults to 0):
The id of the _beginning-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_pad_token_id (`int`, *optional*, defaults to 1):
The id of the _padding_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_eos_token_id (`int`, *optional*, defaults to 2):
The id of the _end-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_decoder_start_token_id (`int`, *optional*, defaults to 2):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
applied to the text-to-unit seq2seq model.
t2u_max_new_tokens (`int`, *optional*, defaults to 1024):
The maximum numbers of unit tokens to generate, ignoring the number of tokens in the prompt. Only applied
to the text-to-unit seq2seq model.
t2u_encoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit encoder.
t2u_encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit encoder.
t2u_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit encoder.
t2u_decoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit decoder.
t2u_decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit decoder.
t2u_decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit decoder.
t2u_max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model text-to-unit component might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
> Hifi-Gan Vocoder specific parameters
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the output audio will be generated, expressed in hertz (Hz).
upsample_initial_channel (`int`, *optional*, defaults to 512):
The number of input channels into the hifi-gan upsampling network. Applies to the vocoder only.
upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[5, 4, 4, 2, 2]`):
A tuple of integers defining the stride of each 1D convolutional layer in the vocoder upsampling network.
The length of *upsample_rates* defines the number of convolutional layers and has to match the length of
*upsample_kernel_sizes*. Applies to the vocoder only.
upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[11, 8, 8, 4, 4]`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the vocoder upsampling
network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match
the length of *upsample_rates*. Applies to the vocoder only.
resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`):
A tuple of integers defining the kernel sizes of the vocoder 1D convolutional layers in the multi-receptive
field fusion (MRF) module. Applies to the vocoder only.
resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
A nested tuple of integers defining the dilation rates of the vocoder dilated 1D convolutional layers in
the multi-receptive field fusion (MRF) module. Applies to the vocoder only.
leaky_relu_slope (`float`, *optional*, defaults to 0.1):
The angle of the negative slope used by the leaky ReLU activation in the vocoder. Applies to the vocoder
only.
unit_hifi_gan_vocab_size (`int`, *optional*, defaults to 10000):
Vocabulary size of the SeamlessM4T vocoder. Defines the number of different unit tokens that can be
represented by the `inputs_ids` passed when calling the vocoder of [`~SeamlessM4TModel`],
[`~SeamlessM4TForSpeechToSpeech`] or [`~SeamlessM4TForTextToSpeech`].
unit_embed_dim (`int`, *optional*, defaults to 1280):
The projection dimension of the input ids given to the hifi-gan vocoder. Applies to the vocoder only.
lang_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the target language given to the hifi-gan vocoder. Applies to the vocoder only.
spkr_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the speaker id given to the hifi-gan vocoder. Applies to the vocoder only.
vocoder_num_langs (`int`, *optional*, defaults to 36):
Number of langs supported by the vocoder. Might be different from `t2u_num_langs`.
vocoder_num_spkrs (`int`, *optional*, defaults to 200):
Number of speakers supported by the vocoder.
variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the duration predictor. Applies to the vocoder only.
var_pred_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability of the duration predictor. Applies to the vocoder only.
vocoder_offset (`int`, *optional*, defaults to 4):
Offset the unit token ids by this number to account for symbol tokens. Applies to the vocoder only.
```python
>>> from transformers import SeamlessM4TModel, SeamlessM4TConfig
>>> # Initializing a SeamlessM4T "facebook/hf-seamless-m4t-medium" style configuration
>>> configuration = SeamlessM4TConfig()
>>> # Initializing a model from the "facebook/hf-seamless-m4t-medium" style configuration
>>> model = SeamlessM4TModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=256102, t2u_vocab_size=10082, hidden_size=1024, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, max_position_embeddings=1024, is_encoder_decoder=True, encoder_layerdrop=0.05, decoder_layerdrop=0.05, activation_function='relu', dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, scale_embedding=True, encoder_layers=24, encoder_ffn_dim=8192, encoder_attention_heads=16, decoder_layers=24, decoder_ffn_dim=8192, decoder_attention_heads=16, decoder_start_token_id=3, max_new_tokens=256, pad_token_id=0, bos_token_id=2, eos_token_id=3, speech_encoder_layers=24, speech_encoder_attention_heads=16, speech_encoder_intermediate_size=4096, speech_encoder_hidden_act='swish', speech_encoder_dropout=0.0, add_adapter=True, speech_encoder_layerdrop=0.1, feature_projection_input_dim=160, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, adaptor_kernel_size=8, adaptor_stride=8, adaptor_dropout=0.1, num_adapter_layers=1, position_embeddings_type='relative', rotary_embedding_base=10000, max_source_positions=4096, conv_depthwise_kernel_size=31, t2u_bos_token_id=0, t2u_pad_token_id=1, t2u_eos_token_id=2, t2u_decoder_start_token_id=2, t2u_max_new_tokens=1024, t2u_encoder_layers=6, t2u_encoder_ffn_dim=8192, t2u_encoder_attention_heads=16, t2u_decoder_layers=6, t2u_decoder_ffn_dim=8192, t2u_decoder_attention_heads=16, t2u_max_position_embeddings=2048, sampling_rate=16000, upsample_initial_channel=512, upsample_rates=[5, 4, 4, 2, 2], upsample_kernel_sizes=[11, 8, 8, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], leaky_relu_slope=0.1, unit_hifi_gan_vocab_size=10000, unit_embed_dim=1280, lang_embed_dim=256, spkr_embed_dim=256, vocoder_num_langs=36, vocoder_num_spkrs=200, variance_predictor_kernel_size=3, var_pred_dropout=0.5, vocoder_offset=4, **kwargs):
pass
| 2
| 1
| 174
| 6
| 154
| 14
| 1
| 1.34
| 1
| 1
| 0
| 0
| 1
| 69
| 1
| 1
| 390
| 25
| 156
| 147
| 79
| 209
| 73
| 72
| 71
| 1
| 1
| 0
| 1
|
5,113
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py
|
transformers.models.seamless_m4t.feature_extraction_seamless_m4t.SeamlessM4TFeatureExtractor
|
from typing import Optional, Union
from ...feature_extraction_utils import BatchFeature
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...utils import PaddingStrategy, TensorType, logging
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...utils import is_torch_available
import numpy as np
class SeamlessM4TFeatureExtractor(SequenceFeatureExtractor):
"""
Constructs a SeamlessM4T feature extractor.
This feature extractor inherits from [`SequenceFeatureExtractor`] which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech.
Args:
feature_size (`int`, *optional*, defaults to 80):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
num_mel_bins (`int`, *optional*, defaults to 80):
Number of Mel-frequency bins.
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding vectors.
stride (`int`, *optional*, defaults to 2):
Stride used to reshape audios from shape (batch_size,num_frames,num_mel_bins) to
(batch_size,num_frames//stride,num_mel_bins*stride).
"""
model_input_names = ['input_features', 'attention_mask']
def __init__(self, feature_size=80, sampling_rate=16000, num_mel_bins=80, padding_value=0.0, stride=2, **kwargs):
self.num_mel_bins = num_mel_bins
self.return_attention_mask = True
self.stride = stride
mel_filters = mel_filter_bank(num_frequency_bins=257, num_mel_filters=self.num_mel_bins, min_frequency=20, max_frequency=sampling_rate // 2, sampling_rate=sampling_rate, norm=None, mel_scale='kaldi', triangularize_in_mel_space=True)
self.mel_filters = mel_filters
self.window = window_function(400, 'povey', periodic=False)
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
@staticmethod
def zero_mean_unit_var_norm(input_values: list[np.ndarray], attention_mask: list[np.ndarray], padding_value: float=0.0) -> list[np.ndarray]:
"""
Every array in the list is normalized to have zero mean and unit variance
"""
if attention_mask is not None:
attention_mask = np.array(attention_mask, np.int32)
normed_input_values = []
for vector, length in zip(input_values, attention_mask.sum(-1)):
normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-07)
if length < normed_slice.shape[0]:
normed_slice[length:] = padding_value
normed_input_values.append(normed_slice)
else:
normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-07) for x in input_values]
return normed_input_values
def _extract_fbank_features(self, waveform: np.ndarray) -> np.ndarray:
"""
Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
and hence the waveform should not be normalized before feature extraction.
"""
if len(waveform.shape) == 2:
waveform = waveform[0]
waveform = np.squeeze(waveform) * 2 ** 15
features = spectrogram(waveform, self.window, frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, preemphasis=0.97, mel_filters=self.mel_filters, log_mel='log', mel_floor=1.192092955078125e-07, remove_dc_offset=True).T
return features
def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Union[bool, str, PaddingStrategy]=True, pad_to_multiple_of: Optional[int]=2, max_length: Optional[int]=None, truncation: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None, return_attention_mask: Optional[bool]=None, do_normalize_per_mel_bins: Optional[bool]=True, **kwargs) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_speech (`np.ndarray`, `torch.Tensor`, `list[float]`, `list[np.ndarray]`, `list[torch.Tensor]`,
`list[list[float]]`, `list[list[list[float]]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array,
a torch tensor, a list of float values, a list of numpy arrays, a list of torch tensors,
a list of list of float values or a list of a list of list of float values.
If `raw_speech` is a one-dimensional `np.ndarray`, `torch.Tensor` or a `list[float]`, `raw_speech` is
considered a single-channel, single-sample sound. In all other cases, the first dimension of
`raw_speech`, whether from an `np.ndarray`, a `torch.Tensor` or a `list[...]`,
corresponds to the number of samples in the batch, and the number of channels
(i.e. mono or stereo character) is derived from the other dimensions
(1D -> single-channel waveform batches; 2D-> stereo-channel waveform batches).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
pad_to_multiple_of (`int`, *optional*, defaults to 2):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
<Tip>
For SeamlessM4T models, `attention_mask` should always be passed for batched inference, to avoid subtle
bugs.
</Tip>
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
do_normalize_per_mel_bins (`bool`, *optional*, defaults to `True`):
Whether or not to zero-mean unit-variance normalize the input per mel-channel.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to the tokenizer or the feature
extractor.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(f'It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. Failing to do so can result in silent errors that might be hard to debug.')
return_attention_mask = return_attention_mask if return_attention_mask is not None else self.return_attention_mask
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 3:
raise ValueError(f'Only mono-channel or stereo-channel audio is supported for input to {self}')
acceptable_types = (torch.Tensor, np.ndarray, tuple, list) if is_torch_available() else (np.ndarray, tuple, list)
is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], acceptable_types))
if is_batched:
raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
elif not is_batched and (not isinstance(raw_speech, np.ndarray)):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
raw_speech = raw_speech.astype(np.float32)
if not is_batched:
raw_speech = [raw_speech]
features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
if do_normalize_per_mel_bins:
features = [(x - np.expand_dims(x.mean(0), 0)) / np.sqrt(np.expand_dims(x.var(0, ddof=1), 0) + 1e-07) for x in features]
encoded_inputs = BatchFeature({'input_features': features})
padded_inputs = self.pad(encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=True, return_tensors='np')
input_features = padded_inputs.get('input_features')
attention_mask = padded_inputs.pop('attention_mask')
batch_size, num_frames, num_channels = input_features.shape
remainder = num_frames % self.stride
if remainder != 0:
input_features = input_features[:, :num_frames - remainder, :]
attention_mask = attention_mask[:, :num_frames - remainder]
input_features = np.reshape(input_features, (batch_size, num_frames // self.stride, num_channels * self.stride))
indices = np.arange(0, num_frames - remainder)
attention_mask = attention_mask[:, indices % self.stride == 1]
padded_inputs['input_features'] = input_features
if return_attention_mask:
padded_inputs['attention_mask'] = attention_mask
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
|
class SeamlessM4TFeatureExtractor(SequenceFeatureExtractor):
'''
Constructs a SeamlessM4T feature extractor.
This feature extractor inherits from [`SequenceFeatureExtractor`] which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech.
Args:
feature_size (`int`, *optional*, defaults to 80):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
num_mel_bins (`int`, *optional*, defaults to 80):
Number of Mel-frequency bins.
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding vectors.
stride (`int`, *optional*, defaults to 2):
Stride used to reshape audios from shape (batch_size,num_frames,num_mel_bins) to
(batch_size,num_frames//stride,num_mel_bins*stride).
'''
def __init__(self, feature_size=80, sampling_rate=16000, num_mel_bins=80, padding_value=0.0, stride=2, **kwargs):
pass
@staticmethod
def zero_mean_unit_var_norm(input_values: list[np.ndarray], attention_mask: list[np.ndarray], padding_value: float=0.0) -> list[np.ndarray]:
'''
Every array in the list is normalized to have zero mean and unit variance
'''
pass
def _extract_fbank_features(self, waveform: np.ndarray) -> np.ndarray:
'''
Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
and hence the waveform should not be normalized before feature extraction.
'''
pass
def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Union[bool, str, PaddingStrategy]=True, pad_to_multiple_of: Optional[int]=2, max_length: Optional[int]=None, truncation: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None, return_attention_mask: Optional[bool]=None, do_normalize_per_mel_bins: Optional[bool]=True, **kwargs) -> BatchFeature:
'''
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_speech (`np.ndarray`, `torch.Tensor`, `list[float]`, `list[np.ndarray]`, `list[torch.Tensor]`,
`list[list[float]]`, `list[list[list[float]]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array,
a torch tensor, a list of float values, a list of numpy arrays, a list of torch tensors,
a list of list of float values or a list of a list of list of float values.
If `raw_speech` is a one-dimensional `np.ndarray`, `torch.Tensor` or a `list[float]`, `raw_speech` is
considered a single-channel, single-sample sound. In all other cases, the first dimension of
`raw_speech`, whether from an `np.ndarray`, a `torch.Tensor` or a `list[...]`,
corresponds to the number of samples in the batch, and the number of channels
(i.e. mono or stereo character) is derived from the other dimensions
(1D -> single-channel waveform batches; 2D-> stereo-channel waveform batches).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
pad_to_multiple_of (`int`, *optional*, defaults to 2):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
<Tip>
For SeamlessM4T models, `attention_mask` should always be passed for batched inference, to avoid subtle
bugs.
</Tip>
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
do_normalize_per_mel_bins (`bool`, *optional*, defaults to `True`):
Whether or not to zero-mean unit-variance normalize the input per mel-channel.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to the tokenizer or the feature
extractor.
'''
pass
| 6
| 4
| 60
| 8
| 35
| 17
| 5
| 0.6
| 1
| 11
| 1
| 0
| 3
| 5
| 4
| 21
| 269
| 41
| 143
| 53
| 112
| 86
| 65
| 27
| 60
| 14
| 3
| 3
| 21
|
5,114
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TAttention
|
from torch import Tensor, nn
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
from ...utils.deprecation import deprecate_kwarg
class SeamlessM4TAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[SeamlessM4TConfig]=None, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = encoder_hidden_states is not None
bsz, tgt_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states) * self.scaling
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}')
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}')
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights_reshaped)
|
class SeamlessM4TAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[SeamlessM4TConfig]=None, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 47
| 6
| 32
| 8
| 4
| 0.28
| 1
| 7
| 1
| 0
| 3
| 12
| 3
| 13
| 147
| 22
| 98
| 43
| 78
| 27
| 63
| 27
| 59
| 10
| 1
| 2
| 13
|
5,115
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan
|
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...modeling_utils import PreTrainedModel
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
@auto_docstring(custom_intro='\n Code HiFi-GAN vocoder as described in this [repository](https://github.com/facebookresearch/speech-resynthesis).\n ')
class SeamlessM4TCodeHifiGan(PreTrainedModel):
config: SeamlessM4TConfig
main_input_name = 'input_embeds'
_no_split_modules = []
def __init__(self, config):
super().__init__(config)
self.pad_token_id = config.t2u_pad_token_id
self.dur_predictor = SeamlessM4TVariancePredictor(config)
self.unit_embedding = nn.Embedding(config.unit_hifi_gan_vocab_size, config.unit_embed_dim)
self.speaker_embedding = nn.Embedding(config.vocoder_num_spkrs, config.spkr_embed_dim)
self.language_embedding = nn.Embedding(config.vocoder_num_langs, config.lang_embed_dim)
self.hifi_gan = SeamlessM4THifiGan(config)
self.post_init()
def _get_dur_output_lengths(self, input_ids, dur_out):
"""
Computes the output length after the duration layer.
"""
unit_lengths = (input_ids != self.pad_token_id).sum(1)
unit_lengths = torch.clamp(unit_lengths, 0, dur_out.shape[1] - 1)
cumulative_dur_out = torch.cumsum(dur_out, dim=1)
unit_lengths = cumulative_dur_out.gather(dim=1, index=unit_lengths.unsqueeze(1)).squeeze()
return unit_lengths
def _get_output_hifigan_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the hifigan convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
return torch.div(input_length + 2 * pad - dilation * (kernel_size - 1) - 1, stride, rounding_mode='floor') + 1
def _transpose_conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
return (input_length - 1) * stride - 2 * pad + dilation * (kernel_size - 1) + 1
input_lengths = _conv_out_length(input_lengths, 7, 1, 3)
for i, (upsample_rate, kernel_size) in enumerate(zip(self.config.upsample_rates, self.config.upsample_kernel_sizes)):
input_lengths = _transpose_conv_out_length(input_lengths, kernel_size, upsample_rate, (kernel_size - upsample_rate) // 2)
for i in range(len(self.config.upsample_rates)):
for kernel_size, dilation in zip(self.config.resblock_kernel_sizes, self.config.resblock_dilation_sizes):
for dil in dilation:
input_lengths = _conv_out_length(input_lengths, kernel_size, 1, (kernel_size - 1) * dil // 2, dilation=dil)
for dil in dilation:
input_lengths = _conv_out_length(input_lengths, kernel_size, 1, (kernel_size - 1) // 2, dilation=1)
input_lengths = _conv_out_length(input_lengths, 7, 1, 3)
return input_lengths
def forward(self, input_ids: torch.LongTensor, spkr_id: torch.Tensor, lang_id: torch.Tensor) -> tuple[torch.Tensor]:
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTextToUnitForConditionalGeneration`]. [What are input
IDs?](../glossary#input-ids)
spkr_id (`int`, *optional*):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
tgt_lang (`str`, *optional*):
The language id to use as target language for translation.
"""
hidden_states = self.unit_embedding(input_ids).transpose(1, 2)
spkr = self.speaker_embedding(spkr_id).transpose(1, 2)
lang = self.language_embedding(lang_id).transpose(1, 2)
log_dur_pred = self.dur_predictor(hidden_states.transpose(1, 2))
dur_out = torch.clamp(torch.round(torch.expm1(log_dur_pred)).long(), min=1)
if hidden_states.size(0) == 1:
hidden_states = torch.repeat_interleave(hidden_states, dur_out.view(-1), dim=2)
else:
if hidden_states.shape[0] > 1 and self.training:
logger.warning('`self.training=True` and you use batching. You lose parallelism during the hifigan\n forward pass because the samples are interleaved.')
hidden_states = [torch.repeat_interleave(hidden_state, duration, dim=-1).transpose(0, 1) for hidden_state, duration in zip(hidden_states, dur_out)]
hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True).transpose(1, 2)
spkr = spkr.repeat(1, 1, hidden_states.shape[-1])
lang = lang.repeat(1, 1, hidden_states.shape[-1])
hidden_states = torch.cat([lang, hidden_states, spkr], dim=1)
hidden_states = self.hifi_gan(hidden_states)
unit_lengths = self._get_dur_output_lengths(input_ids, dur_out)
lengths = self._get_output_hifigan_lengths(unit_lengths)
return (hidden_states, lengths)
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
std = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, 'weight_norm'):
weight_norm = nn.utils.parametrizations.weight_norm
weight_norm(self.hifi_gan.conv_pre)
for layer in self.hifi_gan.upsampler:
weight_norm(layer)
for layer in self.hifi_gan.resblocks:
layer.apply_weight_norm()
weight_norm(self.hifi_gan.conv_post)
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.hifi_gan.conv_pre)
for layer in self.hifi_gan.upsampler:
nn.utils.remove_weight_norm(layer)
for layer in self.hifi_gan.resblocks:
layer.remove_weight_norm()
nn.utils.remove_weight_norm(self.hifi_gan.conv_post)
|
@auto_docstring(custom_intro='\n Code HiFi-GAN vocoder as described in this [repository](https://github.com/facebookresearch/speech-resynthesis).\n ')
class SeamlessM4TCodeHifiGan(PreTrainedModel):
def __init__(self, config):
pass
def _get_dur_output_lengths(self, input_ids, dur_out):
'''
Computes the output length after the duration layer.
'''
pass
def _get_output_hifigan_lengths(self, input_lengths: Union[torch.LongTensor, int]):
'''
Computes the output length of the hifigan convolutional layers
'''
pass
def _conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
pass
def _transpose_conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
pass
def forward(self, input_ids: torch.LongTensor, spkr_id: torch.Tensor, lang_id: torch.Tensor) -> tuple[torch.Tensor]:
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTextToUnitForConditionalGeneration`]. [What are input
IDs?](../glossary#input-ids)
spkr_id (`int`, *optional*):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
tgt_lang (`str`, *optional*):
The language id to use as target language for translation.
'''
pass
def _init_weights(self, module: nn.Module):
'''Initialize the weights.'''
pass
def apply_weight_norm(self):
pass
def remove_weight_norm(self):
pass
| 11
| 4
| 17
| 3
| 11
| 3
| 3
| 0.29
| 1
| 8
| 2
| 0
| 7
| 6
| 7
| 7
| 155
| 30
| 97
| 36
| 85
| 28
| 79
| 34
| 69
| 6
| 1
| 3
| 25
|
5,116
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerAdapter
|
from torch import Tensor, nn
class SeamlessM4TConformerAdapter(nn.Module):
def __init__(self, config):
super().__init__()
self.layers = nn.ModuleList((SeamlessM4TConformerAdapterLayer(config) for _ in range(config.num_adapter_layers)))
def forward(self, hidden_states, attention_mask):
for layer in self.layers:
hidden_states = layer(hidden_states, attention_mask)
return hidden_states
|
class SeamlessM4TConformerAdapter(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask):
pass
| 3
| 0
| 6
| 2
| 4
| 1
| 2
| 0.13
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 13
| 4
| 8
| 5
| 5
| 1
| 8
| 5
| 5
| 2
| 1
| 1
| 3
|
5,117
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerAdapterLayer
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
import torch
from typing import Optional, Union
from torch import Tensor, nn
class SeamlessM4TConformerAdapterLayer(nn.Module):
def __init__(self, config):
super().__init__()
embed_dim = config.hidden_size
dropout = config.adaptor_dropout
self.kernel_size = config.adaptor_kernel_size
self.stride = config.adaptor_stride
self.residual_layer_norm = nn.LayerNorm(embed_dim)
self.residual_conv = nn.Conv1d(embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2)
self.activation = nn.GLU(dim=1)
self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
self.self_attn_conv = nn.Conv1d(embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2)
self.self_attn = SeamlessM4TConformerSelfAttention(config, use_position_embeddings=False)
self.self_attn_dropout = nn.Dropout(dropout)
self.ffn_layer_norm = nn.LayerNorm(embed_dim)
self.ffn = SeamlessM4TConformerFeedForward(config, act_fn='relu', dropout=dropout)
def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
pad = self.kernel_size // 2
seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1)
seq_lens = (seq_lens + 2 * pad - self.kernel_size) / self.stride + 1
return seq_lens.floor()
def forward(self, hidden_states, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False):
residual = self.residual_layer_norm(hidden_states)
residual = residual.transpose(1, 2)
residual = self.residual_conv(residual)
residual = self.activation(residual)
residual = residual.transpose(1, 2)
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.self_attn_conv(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(hidden_states.device)
attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths)
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
hidden_states, attn_weights = self.self_attn(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = self.self_attn_dropout(hidden_states)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.ffn_layer_norm(hidden_states)
hidden_states = self.ffn(hidden_states) + residual
return hidden_states
|
class SeamlessM4TConformerAdapterLayer(nn.Module):
def __init__(self, config):
pass
def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
pass
def forward(self, hidden_states, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False):
pass
| 4
| 0
| 31
| 4
| 23
| 4
| 1
| 0.17
| 1
| 5
| 2
| 0
| 3
| 11
| 3
| 13
| 96
| 15
| 69
| 27
| 60
| 12
| 43
| 22
| 39
| 2
| 1
| 1
| 4
|
5,118
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerConvolutionModule
|
from torch import Tensor, nn
from ...activations import ACT2FN
class SeamlessM4TConformerConvolutionModule(nn.Module):
"""Convolution block used in the conformer block"""
def __init__(self, config):
super().__init__()
if (config.conv_depthwise_kernel_size - 1) % 2 == 1:
raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding")
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.pointwise_conv1 = nn.Conv1d(config.hidden_size, 2 * config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False)
self.glu = nn.GLU(dim=1)
self.depthwise_conv = nn.Conv1d(config.hidden_size, config.hidden_size, config.conv_depthwise_kernel_size, stride=1, padding='same', groups=config.hidden_size, bias=False)
self.batch_norm = nn.BatchNorm1d(config.hidden_size)
self.activation = ACT2FN[config.speech_encoder_hidden_act]
self.pointwise_conv2 = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False)
self.dropout = nn.Dropout(config.speech_encoder_dropout)
def forward(self, hidden_states, attention_mask=None):
hidden_states = self.layer_norm(hidden_states)
if attention_mask is not None:
hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.pointwise_conv1(hidden_states)
hidden_states = self.glu(hidden_states)
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.batch_norm(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.pointwise_conv2(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
|
class SeamlessM4TConformerConvolutionModule(nn.Module):
'''Convolution block used in the conformer block'''
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None):
pass
| 3
| 1
| 30
| 3
| 24
| 4
| 2
| 0.16
| 1
| 2
| 0
| 0
| 2
| 8
| 2
| 12
| 64
| 7
| 49
| 11
| 46
| 8
| 27
| 11
| 24
| 2
| 1
| 1
| 4
|
5,119
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerEncoder
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from torch import Tensor, nn
import torch
from ...integrations.fsdp import is_fsdp_managed_module
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
class SeamlessM4TConformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
if config.position_embeddings_type == 'relative':
self.embed_positions = SeamlessM4TConformerRelPositionalEmbedding(config)
elif config.position_embeddings_type == 'rotary':
self.embed_positions = SeamlessM4TConformerRotaryPositionalEmbedding(config)
else:
self.embed_positions = None
self.dropout = nn.Dropout(config.speech_encoder_dropout)
self.layers = nn.ModuleList([SeamlessM4TConformerEncoderLayer(config) for _ in range(config.speech_encoder_layers)])
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
conv_attention_mask = attention_mask
if attention_mask is not None:
hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
attention_mask = attention_mask.expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
hidden_states = self.dropout(hidden_states)
if self.embed_positions is not None:
relative_position_embeddings = self.embed_positions(hidden_states)
else:
relative_position_embeddings = None
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.speech_encoder_layerdrop
if not skip_the_layer or synced_gpus:
layer_outputs = layer(hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions, conv_attention_mask=conv_attention_mask)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class SeamlessM4TConformerEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
pass
| 3
| 0
| 49
| 8
| 40
| 2
| 9
| 0.05
| 1
| 8
| 4
| 0
| 2
| 6
| 2
| 12
| 100
| 16
| 80
| 26
| 70
| 4
| 46
| 18
| 43
| 14
| 1
| 3
| 17
|
5,120
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerEncoderLayer
|
from ...modeling_layers import GradientCheckpointingLayer
import torch
from typing import Optional, Union
from torch import Tensor, nn
class SeamlessM4TConformerEncoderLayer(GradientCheckpointingLayer):
"""Conformer block based on https://huggingface.co/papers/2005.08100."""
def __init__(self, config):
super().__init__()
embed_dim = config.hidden_size
dropout = config.speech_encoder_dropout
self.ffn1_layer_norm = nn.LayerNorm(embed_dim)
self.ffn1 = SeamlessM4TConformerFeedForward(config)
self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
self.self_attn_dropout = nn.Dropout(dropout)
self.self_attn = SeamlessM4TConformerSelfAttention(config)
self.conv_module = SeamlessM4TConformerConvolutionModule(config)
self.ffn2_layer_norm = nn.LayerNorm(embed_dim)
self.ffn2 = SeamlessM4TConformerFeedForward(config)
self.final_layer_norm = nn.LayerNorm(embed_dim)
def forward(self, hidden_states, attention_mask: Optional[torch.Tensor]=None, relative_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False, conv_attention_mask: Optional[torch.Tensor]=None):
hidden_states = hidden_states
residual = hidden_states
hidden_states = self.ffn1_layer_norm(hidden_states)
hidden_states = self.ffn1(hidden_states)
hidden_states = hidden_states * 0.5 + residual
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weigts = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions)
hidden_states = self.self_attn_dropout(hidden_states)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.ffn2_layer_norm(hidden_states)
hidden_states = self.ffn2(hidden_states)
hidden_states = hidden_states * 0.5 + residual
hidden_states = self.final_layer_norm(hidden_states)
return (hidden_states, attn_weigts)
|
class SeamlessM4TConformerEncoderLayer(GradientCheckpointingLayer):
'''Conformer block based on https://huggingface.co/papers/2005.08100.'''
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask: Optional[torch.Tensor]=None, relative_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False, conv_attention_mask: Optional[torch.Tensor]=None):
pass
| 3
| 1
| 31
| 5
| 23
| 4
| 1
| 0.22
| 1
| 6
| 3
| 0
| 2
| 9
| 2
| 12
| 67
| 11
| 46
| 23
| 36
| 10
| 34
| 16
| 31
| 1
| 1
| 0
| 2
|
5,121
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerFeatureProjection
|
from torch import Tensor, nn
class SeamlessM4TConformerFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps)
self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size)
self.dropout = nn.Dropout(config.speech_encoder_dropout)
def forward(self, hidden_states):
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class SeamlessM4TConformerFeatureProjection(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.09
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 13
| 1
| 11
| 7
| 8
| 1
| 11
| 7
| 8
| 1
| 1
| 0
| 2
|
5,122
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerFeedForward
|
from torch import Tensor, nn
from ...activations import ACT2FN
class SeamlessM4TConformerFeedForward(nn.Module):
def __init__(self, config, act_fn=None, dropout=None):
super().__init__()
dropout = dropout if dropout is not None else config.speech_encoder_dropout
act_fn = act_fn if act_fn is not None else config.speech_encoder_hidden_act
self.intermediate_dropout = nn.Dropout(dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.speech_encoder_intermediate_size)
self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn
self.output_dense = nn.Linear(config.speech_encoder_intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
|
class SeamlessM4TConformerFeedForward(nn.Module):
def __init__(self, config, act_fn=None, dropout=None):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 10
| 2
| 8
| 0
| 3
| 0
| 1
| 2
| 0
| 0
| 2
| 5
| 2
| 12
| 21
| 4
| 17
| 8
| 14
| 0
| 17
| 8
| 14
| 4
| 1
| 0
| 5
|
5,123
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerPositionalConvEmbedding
|
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
from torch import Tensor, nn
from ...activations import ACT2FN
class SeamlessM4TConformerPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, 'weight_norm'):
weight_norm = nn.utils.parametrizations.weight_norm
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = weight_norm(self.conv, name='weight', dim=2)
if hasattr(self.conv, 'parametrizations'):
weight_g = self.conv.parametrizations.weight.original0
weight_v = self.conv.parametrizations.weight.original1
else:
weight_g = self.conv.weight_g
weight_v = self.conv.weight_v
deepspeed.zero.register_external_parameter(self, weight_v)
deepspeed.zero.register_external_parameter(self, weight_g)
else:
self.conv = weight_norm(self.conv, name='weight', dim=2)
self.padding = SeamlessM4TConformerSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.speech_encoder_hidden_act]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
|
class SeamlessM4TConformerPositionalConvEmbedding(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 21
| 3
| 18
| 0
| 3
| 0
| 1
| 2
| 1
| 0
| 2
| 3
| 2
| 12
| 43
| 7
| 36
| 10
| 32
| 0
| 28
| 10
| 24
| 4
| 1
| 2
| 5
|
5,124
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerRelPositionalEmbedding
|
import math
import torch
from torch import Tensor, nn
class SeamlessM4TConformerRelPositionalEmbedding(nn.Module):
"""Relative positional encoding module."""
def __init__(self, config):
super().__init__()
self.max_len = config.max_source_positions
self.d_model = config.hidden_size
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, self.max_len))
def extend_pe(self, x):
if self.pe is not None:
if self.pe.size(1) >= x.size(1) * 2 - 1:
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
pe_positive = torch.zeros(x.size(1), self.d_model)
pe_negative = torch.zeros(x.size(1), self.d_model)
position = torch.arange(0, x.size(1), dtype=torch.int64).float().unsqueeze(1)
div_term = torch.exp(torch.arange(0, self.d_model, 2, dtype=torch.int64).float() * -(math.log(10000.0) / self.d_model))
pe_positive[:, 0::2] = torch.sin(position * div_term)
pe_positive[:, 1::2] = torch.cos(position * div_term)
pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
pe_negative = pe_negative[1:].unsqueeze(0)
pe = torch.cat([pe_positive, pe_negative], dim=1)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, hidden_states: torch.Tensor):
self.extend_pe(hidden_states)
start_idx = self.pe.size(1) // 2 - hidden_states.size(1) + 1
end_idx = self.pe.size(1) // 2 + hidden_states.size(1)
relative_position_embeddings = self.pe[:, start_idx:end_idx]
return relative_position_embeddings
|
class SeamlessM4TConformerRelPositionalEmbedding(nn.Module):
'''Relative positional encoding module.'''
def __init__(self, config):
pass
def extend_pe(self, x):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 4
| 1
| 14
| 1
| 11
| 3
| 2
| 0.3
| 1
| 2
| 0
| 0
| 3
| 3
| 3
| 13
| 48
| 5
| 33
| 15
| 29
| 10
| 31
| 15
| 27
| 4
| 1
| 3
| 6
|
5,125
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerRotaryPositionalEmbedding
|
import torch
from torch import Tensor, nn
class SeamlessM4TConformerRotaryPositionalEmbedding(nn.Module):
"""Rotary positional embedding
Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://huggingface.co/papers/2104.09864
"""
def __init__(self, config):
super().__init__()
dim = config.hidden_size // config.speech_encoder_attention_heads
base = config.rotary_embedding_base
inv_freq = 1.0 / base ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)
self.register_buffer('inv_freq', inv_freq)
self.cached_sequence_length = None
self.cached_rotary_positional_embedding = None
def forward(self, hidden_states):
sequence_length = hidden_states.shape[1]
if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None:
return self.cached_rotary_positional_embedding
self.cached_sequence_length = sequence_length
time_stamps = torch.arange(sequence_length).type_as(self.inv_freq)
freqs = torch.einsum('i,j->ij', time_stamps, self.inv_freq)
embeddings = torch.cat((freqs, freqs), dim=-1)
cos_embeddings = embeddings.cos()[:, None, None, :]
sin_embeddings = embeddings.sin()[:, None, None, :]
self.cached_rotary_positional_embedding = torch.stack([cos_embeddings, sin_embeddings]).type_as(hidden_states)
return self.cached_rotary_positional_embedding
|
class SeamlessM4TConformerRotaryPositionalEmbedding(nn.Module):
'''Rotary positional embedding
Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://huggingface.co/papers/2104.09864
'''
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 13
| 2
| 10
| 1
| 2
| 0.24
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 32
| 6
| 21
| 14
| 18
| 5
| 21
| 14
| 18
| 2
| 1
| 1
| 3
|
5,126
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerSamePadLayer
|
from torch import Tensor, nn
class SeamlessM4TConformerSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, :-self.num_pad_remove]
return hidden_states
|
class SeamlessM4TConformerSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 9
| 1
| 8
| 4
| 5
| 0
| 8
| 4
| 5
| 2
| 1
| 1
| 4
|
5,127
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerSelfAttention
|
import math
import torch
from typing import Optional, Union
from torch import Tensor, nn
class SeamlessM4TConformerSelfAttention(nn.Module):
"""Construct a SeamlessM4TConformerSelfAttention object.
Can be enhanced with rotary or relative position embeddings.
"""
def __init__(self, config, use_position_embeddings=True):
super().__init__()
self.head_size = config.hidden_size // config.speech_encoder_attention_heads
self.num_heads = config.speech_encoder_attention_heads
self.position_embeddings_type = config.position_embeddings_type if use_position_embeddings else None
self.linear_q = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_k = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_v = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_out = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(p=config.speech_encoder_dropout)
if self.position_embeddings_type == 'relative':
self.linear_pos = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, relative_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
batch_size, sequence_length, hidden_size = hidden_states.size()
query_key_states = hidden_states
value_states = hidden_states
if self.position_embeddings_type == 'rotary':
if relative_position_embeddings is None:
raise ValueError("`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'")
query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings)
query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
if self.position_embeddings_type == 'relative':
if relative_position_embeddings is None:
raise ValueError("`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'relative'")
scores = self._apply_relative_embeddings(query=query, key=key, relative_position_embeddings=relative_position_embeddings)
else:
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size)
if attention_mask is not None:
scores = scores + attention_mask
probs = torch.softmax(scores, dim=-1)
probs = self.dropout(probs)
hidden_states = torch.matmul(probs, value)
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size)
hidden_states = self.linear_out(hidden_states)
return (hidden_states, probs)
def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings):
batch_size, sequence_length, hidden_size = hidden_states.size()
hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size)
cos = relative_position_embeddings[0, :sequence_length, ...]
sin = relative_position_embeddings[1, :sequence_length, ...]
hidden_states = hidden_states.transpose(0, 1)
rotated_states_begin = hidden_states[..., :self.head_size // 2]
rotated_states_end = hidden_states[..., self.head_size // 2:]
rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1)
hidden_states = hidden_states * cos + rotated_states * sin
hidden_states = hidden_states.transpose(0, 1)
hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size)
return hidden_states
def _apply_relative_embeddings(self, query, key, relative_position_embeddings):
proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings)
proj_relative_position_embeddings = proj_relative_position_embeddings.view(relative_position_embeddings.size(0), -1, self.num_heads, self.head_size)
proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2)
proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3)
query = query.transpose(1, 2)
q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2)
q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2)
scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1))
scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings)
zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype)
scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1)
scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2])
scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape)
scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd)
scores_bd = scores_bd[:, :, :, :scores_bd.size(-1) // 2 + 1]
scores = (scores_ac + scores_bd) / math.sqrt(self.head_size)
return scores
|
class SeamlessM4TConformerSelfAttention(nn.Module):
'''Construct a SeamlessM4TConformerSelfAttention object.
Can be enhanced with rotary or relative position embeddings.
'''
def __init__(self, config, use_position_embeddings=True):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, relative_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings):
pass
def _apply_relative_embeddings(self, query, key, relative_position_embeddings):
pass
| 5
| 1
| 35
| 6
| 22
| 7
| 3
| 0.36
| 1
| 4
| 0
| 0
| 4
| 11
| 4
| 14
| 149
| 28
| 89
| 45
| 78
| 32
| 73
| 39
| 68
| 6
| 1
| 2
| 11
|
5,128
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TDecoder
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
import math
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
@auto_docstring(custom_intro='\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4TDecoderLayer`].\n ')
class SeamlessM4TDecoder(SeamlessM4TPreTrainedModel):
def __init__(self, config: SeamlessM4TConfig, embed_tokens: Optional[nn.Embedding]=None):
"""
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
"""
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.max_target_positions = config.max_position_embeddings
embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = SeamlessM4TScaledWordEmbedding(embed_tokens.num_embeddings, embed_tokens.embedding_dim, self.padding_idx, embed_scale=embed_scale)
self.embed_tokens.weight = embed_tokens.weight
else:
self.embed_tokens = SeamlessM4TScaledWordEmbedding(self.vocab_size, config.hidden_size, self.padding_idx, embed_scale=embed_scale)
self.embed_positions = SeamlessM4TSinusoidalPositionalEmbedding(self.max_target_positions, config.hidden_size, padding_idx=self.padding_idx)
layers = []
for i in range(config.decoder_layers):
layers.append(SeamlessM4TDecoderLayer(config, decoder_attention_heads=config.decoder_attention_heads, decoder_ffn_dim=config.decoder_ffn_dim, layer_idx=i))
self.layers = nn.ModuleList(layers)
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.gradient_checkpointing = False
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')
elif input_ids is not None:
input = input_ids
input_shape = input.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...')
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)
if encoder_hidden_states is not None and encoder_attention_mask is not None:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
positions = self.embed_positions(input, past_key_values_length=past_key_values_length)
hidden_states = inputs_embeds + positions.to(inputs_embeds.device)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
|
@auto_docstring(custom_intro='\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4TDecoderLayer`].\n ')
class SeamlessM4TDecoder(SeamlessM4TPreTrainedModel):
def __init__(self, config: SeamlessM4TConfig, embed_tokens: Optional[nn.Embedding]=None):
'''
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
pass
| 5
| 1
| 59
| 8
| 37
| 14
| 9
| 0.36
| 1
| 12
| 5
| 0
| 4
| 10
| 4
| 7
| 238
| 34
| 150
| 47
| 129
| 54
| 80
| 31
| 75
| 29
| 2
| 3
| 35
|
5,129
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TDecoderLayer
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...activations import ACT2FN
from .configuration_seamless_m4t import SeamlessM4TConfig
from ...utils.deprecation import deprecate_kwarg
from torch import Tensor, nn
from ...modeling_layers import GradientCheckpointingLayer
import torch
class SeamlessM4TDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4TConfig, decoder_ffn_dim=None, decoder_attention_heads=None, layer_idx=None):
super().__init__()
decoder_ffn_dim = config.decoder_ffn_dim if decoder_ffn_dim is None else decoder_ffn_dim
decoder_attention_heads = config.decoder_attention_heads if decoder_attention_heads is None else decoder_attention_heads
self.embed_dim = config.hidden_size
self.self_attn = SeamlessM4TAttention(embed_dim=self.embed_dim, num_heads=decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.attn_dropout = nn.Dropout(config.dropout)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.cross_attention = SeamlessM4TAttention(self.embed_dim, decoder_attention_heads, config.attention_dropout, is_decoder=True, layer_idx=layer_idx)
self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim)
self.ffn = SeamlessM4TFeedForwardNetwork(config, ffn_dim=decoder_ffn_dim)
self.ffn_layer_norm = nn.LayerNorm(config.hidden_size)
self.ffn_dropout = nn.Dropout(config.activation_dropout)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`):
encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by
very large negative values.
past_key_values (`Cache`):
cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.cross_attention_layer_norm(hidden_states)
hidden_states, cross_attn_weights = self.cross_attention(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, attention_mask=encoder_attention_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.ffn_layer_norm(hidden_states)
hidden_states = self.ffn(hidden_states)
hidden_states = self.ffn_dropout(hidden_states)
hidden_states = residual + hidden_states
return (hidden_states, self_attn_weights, cross_attn_weights)
|
class SeamlessM4TDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4TConfig, decoder_ffn_dim=None, decoder_attention_heads=None, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor:
'''
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`):
encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by
very large negative values.
past_key_values (`Cache`):
cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 4
| 1
| 56
| 8
| 35
| 13
| 4
| 0.35
| 1
| 6
| 3
| 0
| 2
| 11
| 2
| 12
| 113
| 17
| 71
| 30
| 59
| 25
| 42
| 21
| 39
| 5
| 1
| 1
| 8
|
5,130
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TEncoder
|
import math
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
@auto_docstring(custom_intro='\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`SeamlessM4TEncoderLayer`].\n ')
class SeamlessM4TEncoder(SeamlessM4TPreTrainedModel):
def __init__(self, config: SeamlessM4TConfig, embed_tokens: Optional[nn.Embedding]=None, is_t2u_encoder: bool=False):
"""
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
is_t2u_encoder (`bool`, *optional*, defaults to `False`):
indicates if it belongs to the text-to-units model, in which case it won't have input embeddings
"""
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.padding_idx = config.pad_token_id
embed_dim = config.hidden_size
self.is_t2u_encoder = is_t2u_encoder
self.max_source_positions = config.max_position_embeddings
if not self.is_t2u_encoder:
embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_tokens = SeamlessM4TScaledWordEmbedding(config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale)
if embed_tokens is not None:
self.embed_tokens.weight = embed_tokens.weight
self.embed_positions = SeamlessM4TSinusoidalPositionalEmbedding(self.max_source_positions, embed_dim, self.padding_idx)
layers = []
for _ in range(config.encoder_layers):
layers.append(SeamlessM4TEncoderLayer(config, encoder_attention_heads=config.encoder_attention_heads, encoder_ffn_dim=config.encoder_ffn_dim))
self.layers = nn.ModuleList(layers)
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.gradient_checkpointing = False
self.post_init()
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, BaseModelOutput]:
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and self.is_t2u_encoder:
raise ValueError('You cannot pass input_ids to the encoder of the text_to_units model. Pass inputs_embeds instead.')
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input = inputs_embeds[:, :, -1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if not self.is_t2u_encoder:
embed_pos = self.embed_positions(input)
hidden_states = inputs_embeds + embed_pos.to(inputs_embeds.device)
else:
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if attention_mask is not None:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
@auto_docstring(custom_intro='\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`SeamlessM4TEncoderLayer`].\n ')
class SeamlessM4TEncoder(SeamlessM4TPreTrainedModel):
def __init__(self, config: SeamlessM4TConfig, embed_tokens: Optional[nn.Embedding]=None, is_t2u_encoder: bool=False):
'''
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
is_t2u_encoder (`bool`, *optional*, defaults to `False`):
indicates if it belongs to the text-to-units model, in which case it won't have input embeddings
'''
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, BaseModelOutput]:
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 4
| 2
| 87
| 15
| 57
| 16
| 14
| 0.28
| 1
| 12
| 5
| 0
| 2
| 10
| 2
| 5
| 176
| 30
| 115
| 40
| 98
| 32
| 68
| 26
| 65
| 22
| 2
| 3
| 27
|
5,131
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TEncoderLayer
|
from ...modeling_layers import GradientCheckpointingLayer
import torch
from torch import Tensor, nn
from .configuration_seamless_m4t import SeamlessM4TConfig
class SeamlessM4TEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4TConfig, encoder_ffn_dim=None, encoder_attention_heads=None):
super().__init__()
encoder_ffn_dim = config.encoder_ffn_dim if encoder_ffn_dim is None else encoder_ffn_dim
encoder_attention_heads = config.encoder_attention_heads if encoder_attention_heads is None else encoder_attention_heads
self.embed_dim = config.hidden_size
self.self_attn = SeamlessM4TAttention(embed_dim=self.embed_dim, num_heads=encoder_attention_heads, dropout=config.attention_dropout)
self.attn_dropout = nn.Dropout(config.dropout)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.ffn = SeamlessM4TFeedForwardNetwork(config, ffn_dim=encoder_ffn_dim)
self.ffn_layer_norm = nn.LayerNorm(config.hidden_size)
self.ffn_dropout = nn.Dropout(config.activation_dropout)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.ffn_layer_norm(hidden_states)
hidden_states = self.ffn(hidden_states)
hidden_states = self.ffn_dropout(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class SeamlessM4TEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4TConfig, encoder_ffn_dim=None, encoder_attention_heads=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:
'''
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
'''
pass
| 3
| 1
| 30
| 5
| 21
| 4
| 3
| 0.19
| 1
| 6
| 3
| 0
| 2
| 7
| 2
| 12
| 61
| 11
| 42
| 18
| 34
| 8
| 27
| 13
| 24
| 3
| 1
| 1
| 5
|
5,132
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TFeedForwardNetwork
|
from .configuration_seamless_m4t import SeamlessM4TConfig
import torch
from torch import Tensor, nn
from ...activations import ACT2FN
class SeamlessM4TFeedForwardNetwork(nn.Module):
def __init__(self, config: SeamlessM4TConfig, ffn_dim: int):
super().__init__()
self.fc1 = nn.Linear(config.hidden_size, ffn_dim)
self.fc2 = nn.Linear(ffn_dim, config.hidden_size)
self.dropout = nn.Dropout(config.activation_dropout)
self.act = ACT2FN[config.activation_function]
def forward(self, hidden_states):
hidden_states = self.fc1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
if isinstance(self.fc2.weight, torch.Tensor) and hidden_states.dtype != self.fc2.weight.dtype and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8):
hidden_states = hidden_states.to(self.fc2.weight.dtype)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class SeamlessM4TFeedForwardNetwork(nn.Module):
def __init__(self, config: SeamlessM4TConfig, ffn_dim: int):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 4
| 2
| 12
| 20
| 1
| 19
| 7
| 16
| 0
| 15
| 7
| 12
| 2
| 1
| 1
| 3
|
5,133
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n The speech-to-speech SeamlessM4T Model transformer which can be used for S2ST.\n ')
class SeamlessM4TForSpeechToSpeech(SeamlessM4TPreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = ['text_encoder']
main_input_name = 'input_features'
_tied_weights_keys = ['lm_head.weight', 'text_decoder.embed_tokens.weight']
def __init__(self, config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.speech_encoder = SeamlessM4TSpeechEncoder(config)
self.text_decoder = SeamlessM4TDecoder(config, self.shared)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config)
self.vocoder = SeamlessM4TCodeHifiGan(config)
def get_encoder(self):
return self.speech_encoder
def get_decoder(self):
return self.text_decoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_decoder.embed_tokens = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.lm_head, self.shared)
@auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS)
def forward(self, input_features: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
logger.warning("This is the same forward method as `SeamlessM4TForSpeechToText`. It doesn't use `self.t2u_model`.If you want to generate speech, use the `generate` method.")
encoder_outputs = self.speech_encoder(input_features=input_features, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
encoder_attention_mask = attention_mask
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(encoder_outputs[0].device)
encoder_attention_mask = _compute_new_attention_mask(hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths)
decoder_outputs = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@torch.no_grad()
def generate(self, input_features: Optional[torch.Tensor]=None, return_intermediate_token_ids: Optional[bool]=None, tgt_lang: Optional[str]=None, spkr_id: Optional[int]=0, **kwargs) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]:
"""
Generates translated audio waveforms.
<Tip>
This method successively calls the `.generate` function of two different sub-models. You can specify keyword
arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
that will be passed to one of them.
For example, calling `.generate(input_features, num_beams=4, speech_do_sample=True)` will successively perform
beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Args:
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
Input audio features. This should be returned by the [`SeamlessM4TFeatureExtractor`] class or the
[`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
return_intermediate_token_ids (`bool`, *optional*):
If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
to get translated text alongside the audio.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
spkr_id (`int`, *optional*, defaults to 0):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
Returns:
`Union[SeamlessM4TGenerationOutput, tuple[Tensor]]`:
- If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`].
- If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
sequence_length)` and `waveform_lengths` which gives the length of each sample.
"""
batch_size = len(input_features) if input_features is not None else len(kwargs.get('inputs_embeds'))
if tgt_lang is None:
raise ValueError('You must specify a `tgt_lang` to generate translated speech.')
else:
tgt_lang = tgt_lang.replace('__', '')
for key in ['text_decoder_lang_to_code_id', 't2u_lang_code_to_id', 'vocoder_lang_code_to_id']:
lang_code_to_id = getattr(self.generation_config, key, None)
if lang_code_to_id is None:
raise ValueError(f"This model generation config doesn't have a `{key}` key which maps the target language\n to the right token id. Make sure to load the right generation config.")
elif tgt_lang not in lang_code_to_id:
raise ValueError(f"`tgt_lang={tgt_lang}` is not supported by this model.\n Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4T supports\n more languages for text translation than for speech synthesis.")
kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
kwargs_text['output_hidden_states'] = True
kwargs_text['return_dict_in_generate'] = True
kwargs_text['output_scores'] = True
text_decoder_input_ids = kwargs_text.get('decoder_input_ids')
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
kwargs_text['decoder_input_ids'] = text_decoder_input_ids
text_generation_output = super().generate(input_features, **kwargs_text)
sequences = text_generation_output.sequences
num_return_sequences = len(sequences) // batch_size
attention_mask = kwargs_speech.get('attention_mask', kwargs_text.get('attention_mask', None))
encoder_hidden_states = self.speech_encoder(input_features=input_features, attention_mask=attention_mask)[0]
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(encoder_hidden_states.device)
attention_mask = _compute_new_attention_mask(hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths)
if num_return_sequences > 1:
idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1)
idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1)
idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch + torch.arange(batch_size, device=self.device) * num_return_sequences
sequences = sequences[idx_most_probable_sequences_per_batch]
t2u_input_embeds = self.text_decoder(input_ids=sequences, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask).last_hidden_state
pad_token_id = self.generation_config.pad_token_id
seq_lens = (sequences != pad_token_id).int().sum(1)
t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
kwargs_speech['attention_mask'] = t2u_model_attention_mask
t2u_decoder_input_ids = kwargs_speech.get('decoder_input_ids')
t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang)
t2u_decoder_input_ids = torch.tensor([[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size, device=self.device)
kwargs_speech['decoder_input_ids'] = t2u_decoder_input_ids
unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech)
output_unit_ids = unit_ids.detach().clone()
unit_ids = unit_ids[:, kwargs_speech['decoder_input_ids'].shape[1]:]
unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id
unit_ids = torch.where(unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset)
vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids), device=self.device)
spkr_id = torch.tensor([[spkr_id]] * len(unit_ids), device=self.device)
waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id)
if return_intermediate_token_ids:
return SeamlessM4TGenerationOutput(waveform=waveform, waveform_lengths=waveform_lengths, sequences=sequences, unit_sequences=output_unit_ids)
return (waveform, waveform_lengths)
| null | 12
| 2
| 28
| 4
| 19
| 5
| 3
| 0.26
| 1
| 14
| 7
| 0
| 10
| 6
| 11
| 14
| 333
| 55
| 220
| 80
| 182
| 58
| 117
| 54
| 105
| 16
| 2
| 3
| 36
|
5,134
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
from torch.nn import CrossEntropyLoss
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
@auto_docstring(custom_intro='\n The speech-to-text SeamlessM4T Model transformer which can be used for S2TT.\n ')
class SeamlessM4TForSpeechToText(SeamlessM4TPreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = ['text_encoder', 't2u_model', 'vocoder']
main_input_name = 'input_features'
_tied_weights_keys = ['lm_head.weight', 'text_decoder.embed_tokens.weight']
def __init__(self, config: SeamlessM4TConfig):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.speech_encoder = SeamlessM4TSpeechEncoder(config)
self.text_decoder = SeamlessM4TDecoder(config, self.shared)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_encoder(self):
return self.speech_encoder
def get_decoder(self):
return self.text_decoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_decoder.embed_tokens = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.lm_head, self.shared)
@auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS)
def forward(self, input_features: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.speech_encoder(input_features=input_features, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
encoder_attention_mask = attention_mask
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(encoder_outputs[0].device)
encoder_attention_mask = _compute_new_attention_mask(hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths)
decoder_outputs = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
def generate(self, input_features=None, tgt_lang=None, generation_config=None, logits_processor=None, stopping_criteria=None, prefix_allowed_tokens_fn=None, synced_gpus=False, **kwargs):
"""
Generates sequences of token ids.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
Input audio features. This should be returned by the [`SeamlessM4TFeatureExtractor`] class or the
[`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], list[int]]`, *optional*):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
`input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
Retrieval](https://huggingface.co/papers/2010.00904).
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed to avoid deadlocking with
`FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model.
Return:
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible
[`~utils.ModelOutput`] types are:
- [`~generation.GenerateEncoderDecoderOutput`],
- [`~generation.GenerateBeamEncoderDecoderOutput`]
"""
text_decoder_input_ids = kwargs.pop('decoder_input_ids', None)
input_features = input_features if input_features is not None else kwargs.pop('inputs')
if tgt_lang is not None:
inputs = kwargs.get('input_embeds') if input_features is None else input_features
inputs = inputs if inputs is not None else kwargs.get('encoder_outputs', {'last_hidden_state': None})['last_hidden_state']
batch_size = len(inputs)
if hasattr(self.generation_config, 'text_decoder_lang_to_code_id'):
tgt_lang = tgt_lang.replace('__', '')
if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id:
raise ValueError(f"`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in\n {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}")
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
else:
raise ValueError("This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps\n the target language to the right token id. Make sure to load the right generation config.")
else:
logger.warning('You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get\n a correct generation, otherwise the generation will probably make no sense.')
return super().generate(input_features, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, decoder_input_ids=text_decoder_input_ids, **kwargs)
| null | 11
| 2
| 22
| 2
| 15
| 5
| 3
| 0.33
| 1
| 10
| 5
| 0
| 10
| 4
| 11
| 14
| 264
| 30
| 176
| 61
| 136
| 58
| 78
| 33
| 66
| 16
| 2
| 3
| 34
|
5,135
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n The text-to-speech SeamlessM4T Model transformer which can be used for T2ST.\n ')
class SeamlessM4TForTextToSpeech(SeamlessM4TPreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = ['speech_encoder']
main_input_name = 'input_ids'
_tied_weights_keys = ['lm_head.weight', 'text_encoder.embed_tokens.weight', 'text_decoder.embed_tokens.weight']
def __init__(self, config: SeamlessM4TConfig):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.text_encoder = SeamlessM4TEncoder(config, self.shared)
self.text_decoder = SeamlessM4TDecoder(config, self.shared)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config)
self.vocoder = SeamlessM4TCodeHifiGan(config)
def get_encoder(self):
return self.text_encoder
def get_decoder(self):
return self.text_decoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_encoder.embed_tokens = value
self.text_decoder.embed_tokens = value
self.shared = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.lm_head, self.shared)
@auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS)
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
logger.warning("This is the same forward method as `SeamlessM4TForTextToText`.It doesn't use the text-to-unit model `SeamlessM4TTextToUnitForConditionalGeneration`.If you want to generate speech, use the `.generate` method.")
encoder_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
encoder_attention_mask = attention_mask
decoder_outputs = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@torch.no_grad()
def generate(self, input_ids: Optional[torch.Tensor]=None, return_intermediate_token_ids: Optional[bool]=None, tgt_lang: Optional[str]=None, spkr_id: Optional[int]=0, **kwargs) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]:
"""
Generates translated audio waveforms.
<Tip>
This method successively calls the `.generate` function of two different sub-models. You can specify keyword
arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
that will be passed to one of them.
For example, calling `.generate(input_ids, num_beams=4, speech_do_sample=True)` will successively perform
beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
return_intermediate_token_ids (`bool`, *optional*):
If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
to get translated text alongside the audio.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
spkr_id (`int`, *optional*, defaults to 0):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
Returns:
`Union[SeamlessM4TGenerationOutput, tuple[Tensor]]`:
- If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`].
- If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
sequence_length)` and `waveform_lengths` which gives the length of each sample.
"""
batch_size = len(input_ids) if input_ids is not None else len(kwargs.get('inputs_embeds'))
if tgt_lang is None:
raise ValueError('You must specify a `tgt_lang` to generate translated speech.')
else:
tgt_lang = tgt_lang.replace('__', '')
for key in ['text_decoder_lang_to_code_id', 't2u_lang_code_to_id', 'vocoder_lang_code_to_id']:
lang_code_to_id = getattr(self.generation_config, key, None)
if lang_code_to_id is None:
raise ValueError(f"This model generation config doesn't have a `{key}` key which maps the target language\n to the right token id. Make sure to load the right generation config.")
elif tgt_lang not in lang_code_to_id:
raise ValueError(f"`tgt_lang={tgt_lang}` is not supported by this model.\n Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4T supports\n more languages for text translation than for speech synthesis.")
kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
kwargs_text['output_hidden_states'] = True
kwargs_text['return_dict_in_generate'] = True
kwargs_text['output_scores'] = True
text_decoder_input_ids = kwargs_text.get('decoder_input_ids')
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
kwargs_text['decoder_input_ids'] = text_decoder_input_ids
text_generation_output = super().generate(input_ids, **kwargs_text)
sequences = text_generation_output.sequences
num_return_sequences = len(sequences) // batch_size
attention_mask = kwargs_speech.get('attention_mask', kwargs_text.get('attention_mask', None))
encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
if num_return_sequences > 1:
idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1)
idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1)
idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch + torch.arange(batch_size, device=self.device) * num_return_sequences
sequences = sequences[idx_most_probable_sequences_per_batch]
t2u_input_embeds = self.text_decoder(input_ids=sequences, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask).last_hidden_state
pad_token_id = self.generation_config.pad_token_id
seq_lens = (sequences != pad_token_id).int().sum(1)
t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
kwargs_speech['attention_mask'] = t2u_model_attention_mask
t2u_decoder_input_ids = kwargs_speech.get('decoder_input_ids')
t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang)
t2u_decoder_input_ids = torch.tensor([[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size, device=self.device)
kwargs_speech['decoder_input_ids'] = t2u_decoder_input_ids
unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech)
output_unit_ids = unit_ids.detach().clone()
unit_ids = unit_ids[:, kwargs_speech['decoder_input_ids'].shape[1]:]
unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id
unit_ids = torch.where(unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset)
vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids), device=self.device)
spkr_id = torch.tensor([[spkr_id]] * len(unit_ids), device=self.device)
waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id)
if return_intermediate_token_ids:
return SeamlessM4TGenerationOutput(waveform=waveform, waveform_lengths=waveform_lengths, sequences=sequences, unit_sequences=output_unit_ids)
return (waveform, waveform_lengths)
| null | 12
| 2
| 27
| 4
| 18
| 5
| 3
| 0.28
| 1
| 15
| 8
| 0
| 10
| 6
| 11
| 14
| 323
| 55
| 210
| 77
| 173
| 58
| 114
| 52
| 102
| 15
| 2
| 3
| 34
|
5,136
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToText
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
from torch.nn import CrossEntropyLoss
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
@auto_docstring(custom_intro='\n The text-to-text SeamlessM4T Model transformer which can be used for T2TT.\n ')
class SeamlessM4TForTextToText(SeamlessM4TPreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = ['speech_encoder', 't2u_model', 'vocoder']
main_input_name = 'input_ids'
_tied_weights_keys = ['lm_head.weight', 'text_encoder.embed_tokens.weight', 'text_decoder.embed_tokens.weight']
def __init__(self, config: SeamlessM4TConfig):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.text_encoder = SeamlessM4TEncoder(config, self.shared)
self.text_decoder = SeamlessM4TDecoder(config, self.shared)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_encoder(self):
return self.text_encoder
def get_decoder(self):
return self.text_decoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_encoder.embed_tokens = value
self.text_decoder.embed_tokens = value
self.shared = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.lm_head, self.shared)
@auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS)
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
encoder_attention_mask = attention_mask
decoder_outputs = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
def generate(self, input_ids=None, tgt_lang=None, generation_config=None, logits_processor=None, stopping_criteria=None, prefix_allowed_tokens_fn=None, synced_gpus=False, **kwargs):
"""
Generates sequences of token ids.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_ids (`torch.Tensor` of varying shape depending on the modality, *optional*):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], list[int]]`, *optional*):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
`input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
Retrieval](https://huggingface.co/papers/2010.00904).
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed to avoid deadlocking with
`FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model.
Return:
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible
[`~utils.ModelOutput`] types are:
- [`~generation.GenerateEncoderDecoderOutput`],
- [`~generation.GenerateBeamEncoderDecoderOutput`]
"""
text_decoder_input_ids = kwargs.pop('decoder_input_ids', None)
if tgt_lang is not None:
batch_size = len(input_ids) if input_ids is not None else len(kwargs.get('inputs_embeds'))
if hasattr(self.generation_config, 'text_decoder_lang_to_code_id'):
tgt_lang = tgt_lang.replace('__', '')
if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id:
raise ValueError(f"`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in\n {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}")
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
else:
raise ValueError("This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps\n the target language to the right token id. Make sure to load the right generation config.")
else:
logger.warning('You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get\n a correct generation, otherwise the generation will probably make no sense.')
return super().generate(input_ids, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, decoder_input_ids=text_decoder_input_ids, **kwargs)
| null | 11
| 2
| 22
| 2
| 14
| 6
| 3
| 0.37
| 2
| 10
| 5
| 0
| 10
| 4
| 11
| 14
| 260
| 33
| 166
| 59
| 126
| 61
| 75
| 31
| 63
| 15
| 2
| 3
| 31
|
5,137
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TGenerationOutput
|
from ...utils import ModelOutput, auto_docstring, logging
from dataclasses import dataclass
import torch
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Class defining the generated outputs from [`SeamlessM4TModel`], [`SeamlessM4TForTextToText`],\n [`SeamlessM4TForTextToSpeech`], [`SeamlessM4TForSpeechToSpeech`] and [`SeamlessM4TForTextToSpeech`].\n ')
class SeamlessM4TGenerationOutput(ModelOutput):
"""
waveform (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
The final audio waveform predicted by the model.
waveform_lengths (`torch.IntTensor` of shape `(batch_size,)`, *optional*):
The length in samples of each element in the `waveform` batch.
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The generated translated sequences. This is the output of the text-to-text or the speech-to-text models.
The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished
early due to the `eos_token_id`.
unit_sequences (`torch.LongTensor` of shape `(batch_size, unit_sequence_length)`, *optional*):
The generated translated unit sequences. This is the output of the text-to-units model. The second
dimension (unit_sequence_length) is either equal to `t2u_max_length` or shorter if all batches finished
early due to the `t2u_eos_token_id`.
"""
waveform: Optional[torch.FloatTensor] = None
waveform_lengths: Optional[torch.IntTensor] = None
sequences: Optional[tuple[torch.FloatTensor]] = None
unit_sequences: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class defining the generated outputs from [`SeamlessM4TModel`], [`SeamlessM4TForTextToText`],\n [`SeamlessM4TForTextToSpeech`], [`SeamlessM4TForSpeechToSpeech`] and [`SeamlessM4TForTextToSpeech`].\n ')
class SeamlessM4TGenerationOutput(ModelOutput):
'''
waveform (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
The final audio waveform predicted by the model.
waveform_lengths (`torch.IntTensor` of shape `(batch_size,)`, *optional*):
The length in samples of each element in the `waveform` batch.
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The generated translated sequences. This is the output of the text-to-text or the speech-to-text models.
The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished
early due to the `eos_token_id`.
unit_sequences (`torch.LongTensor` of shape `(batch_size, unit_sequence_length)`, *optional*):
The generated translated unit sequences. This is the output of the text-to-units model. The second
dimension (unit_sequence_length) is either equal to `t2u_max_length` or shorter if all batches finished
early due to the `t2u_eos_token_id`.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 2
| 5
| 5
| 4
| 17
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
5,138
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4THifiGan
|
import torch
from torch import Tensor, nn
from .configuration_seamless_m4t import SeamlessM4TConfig
class SeamlessM4THifiGan(nn.Module):
def __init__(self, config: SeamlessM4TConfig):
super().__init__()
model_in_dim = config.unit_embed_dim + config.lang_embed_dim + config.spkr_embed_dim
self.leaky_relu_slope = config.leaky_relu_slope
self.num_kernels = len(config.resblock_kernel_sizes)
self.num_upsamples = len(config.upsample_rates)
self.conv_pre = nn.Conv1d(model_in_dim, config.upsample_initial_channel, kernel_size=7, stride=1, padding=3)
self.upsampler = nn.ModuleList()
for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
self.upsampler.append(nn.ConvTranspose1d(config.upsample_initial_channel // 2 ** i, config.upsample_initial_channel // 2 ** (i + 1), kernel_size=kernel_size, stride=upsample_rate, padding=(kernel_size - upsample_rate) // 2))
self.resblocks = nn.ModuleList()
for i in range(len(self.upsampler)):
channels = config.upsample_initial_channel // 2 ** (i + 1)
for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor:
"""
Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
waveform.
Args:
spectrogram (`torch.FloatTensor`):
Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
model_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim`
is the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`.
Returns:
`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
"""
hidden_states = self.conv_pre(input_embeds)
for i in range(self.num_upsamples):
hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
hidden_states = self.upsampler[i](hidden_states)
res_state = self.resblocks[i * self.num_kernels](hidden_states)
for j in range(1, self.num_kernels):
res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
hidden_states = res_state / self.num_kernels
hidden_states = nn.functional.leaky_relu(hidden_states)
hidden_states = self.conv_post(hidden_states)
hidden_states = torch.tanh(hidden_states)
waveform = hidden_states.squeeze(1)
return waveform
|
class SeamlessM4THifiGan(nn.Module):
def __init__(self, config: SeamlessM4TConfig):
pass
def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor:
'''
Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
waveform.
Args:
spectrogram (`torch.FloatTensor`):
Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
model_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim`
is the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`.
Returns:
`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
'''
pass
| 3
| 1
| 34
| 5
| 22
| 7
| 4
| 0.31
| 1
| 6
| 2
| 0
| 2
| 7
| 2
| 12
| 70
| 11
| 45
| 19
| 42
| 14
| 31
| 19
| 28
| 4
| 1
| 2
| 7
|
5,139
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n The original SeamlessM4T Model transformer which can be used for every tasks available (S2ST, S2TT, T2TT, T2ST).\n ')
class SeamlessM4TModel(SeamlessM4TPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight', 'text_encoder.embed_tokens.weight', 'text_decoder.embed_tokens.weight']
def __init__(self, config, current_modality='text'):
"""
current_modality (`str`, *optional*, defaults to `"text"`):
Default modality. Used to initialize the model.
"""
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.text_encoder = SeamlessM4TEncoder(config, self.shared)
self.speech_encoder = SeamlessM4TSpeechEncoder(config)
self.text_decoder = SeamlessM4TDecoder(config, self.shared)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
self.current_modality = current_modality
if current_modality == 'speech':
self.main_input_name = 'input_features'
self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config)
self.vocoder = SeamlessM4TCodeHifiGan(config)
def set_modality(self, modality='text'):
if modality == 'text':
self.main_input_name = 'input_ids'
self.current_modality = 'text'
elif modality == 'speech':
self.main_input_name = 'input_features'
self.current_modality = 'speech'
else:
raise ValueError(f'`modality={modality}` is not a valid modality. It must be `text` or `speech`.')
def get_encoder(self):
if self.current_modality == 'text':
return self.text_encoder
else:
return self.speech_encoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_encoder.embed_tokens = value
self.text_decoder.embed_tokens = value
self.shared = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.lm_head, self.shared)
@auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS)
def forward(self, input_ids: Optional[torch.LongTensor]=None, input_features: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
if input_ids is None and input_features is None and (inputs_embeds is None) and (encoder_outputs is None):
raise ValueError('`input_ids`,`input_features`, `inputs_embeds` and `encoder_outputs` are all empty. Make sure at least one of them is not.')
elif input_features is not None:
if input_ids is not None:
logger.warning('`input_ids` is not `None` but `input_features` has been given.`input_features` will be used in priority through the `speech_encoder`. Make sure that `input_features` and `input_ids` are mutually exclusive.')
if inputs_embeds is not None:
logger.warning('`inputs_embeds` is not `None` but `input_features` has been given.`input_features` will be used in priority through `speech_encoder`. `inputs_embeds` will be ignored.')
logger.warning('This calls the same method `forward` as `SeamlessM4TForTextToText` and `SeamlessM4TForSpeechToText`depending on the input modality. If you want to generate speech, use the `generate` method.')
self.set_modality('speech')
encoder_outputs = self.speech_encoder(input_features=input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif input_ids is not None or inputs_embeds is not None:
logger.warning('This calls the same method `forward` as `SeamlessM4TForTextToText` and `SeamlessM4TForSpeechToText`depending on the input modality. If you want to generate speech, use the `generate` method.')
self.set_modality('text')
encoder_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
encoder_attention_mask = attention_mask
if self.current_modality == 'speech' and attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(encoder_outputs[0].device)
encoder_attention_mask = _compute_new_attention_mask(hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths)
decoder_outputs = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@torch.no_grad()
def generate(self, input_ids: Optional[torch.Tensor]=None, input_features: Optional[torch.Tensor]=None, return_intermediate_token_ids: Optional[bool]=None, tgt_lang: Optional[str]=None, spkr_id: Optional[int]=0, generate_speech: Optional[bool]=True, **kwargs) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]:
"""
Generates translated token ids and/or translated audio waveforms.
<Tip>
This method successively calls the `.generate` function of two different sub-models. You can specify keyword
arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
that will be passed to one of them.
For example, calling `.generate(input_ids=input_ids, num_beams=4, speech_do_sample=True)` will successively
perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`, *optional*):
Input audio features. This should be returned by the [`SeamlessM4TFeatureExtractor`] class or the
[`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
return_intermediate_token_ids (`bool`, *optional*):
If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
to get translated text alongside the audio.
Note that if `generate_speech=False`, this parameter will be ignored and
the text tokens are returned.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
spkr_id (`int`, *optional*, defaults to 0):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
generate_speech (`bool`, *optional*, defaults to `True`):
If `False`, will only returns the text tokens and won't generate speech.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
Returns:
`Union[SeamlessM4TGenerationOutput, tuple[Tensor], ModelOutput]`:
- If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`].
- If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of
shape `(batch_size, sequence_length)` and `waveform_lengths` which gives the length of each sample.
- If `generate_speech=False`, it will returns `ModelOutput`.
"""
if input_ids is None and input_features is None and (kwargs.get('inputs_embeds') is None):
raise ValueError('`input_ids`,`input_features` and `inputs_embeds` are all empty. Make sure at least one of them is not.')
if generate_speech and tgt_lang is None:
raise ValueError('You must specify a `tgt_lang` to generate translated speech.')
if tgt_lang is not None:
tgt_lang = tgt_lang.replace('__', '')
for key in ['text_decoder_lang_to_code_id', 't2u_lang_code_to_id', 'vocoder_lang_code_to_id']:
lang_code_to_id = getattr(self.generation_config, key, None)
if lang_code_to_id is None:
raise ValueError(f"This model generation config doesn't have a `{key}` key which maps the target language\n to the right token id. Make sure to load the right generation config.")
elif tgt_lang not in lang_code_to_id:
raise ValueError(f"`tgt_lang={tgt_lang}` is not supported by this model.\n Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4T supports\n more languages for text translation than for speech synthesis.")
batch_size = len(input_features) if input_features is not None else len(input_ids) if input_ids is not None else len(kwargs.get('inputs_embeds'))
kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
kwargs_text['output_hidden_states'] = True
kwargs_text['return_dict_in_generate'] = True
kwargs_text['output_scores'] = True
text_decoder_input_ids = kwargs_text.get('decoder_input_ids')
if tgt_lang is not None:
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
kwargs_text['decoder_input_ids'] = text_decoder_input_ids
if input_features is not None:
self.set_modality('speech')
if input_ids is not None:
logger.warning('`input_features` and `input_ids` are both non empty. `input_features` will be used in priority through the speech encoder. Make sure `input_features=None` if you want to use the text encoder.')
text_generation_output = super().generate(input_features=input_features, **kwargs_text)
else:
self.set_modality('text')
text_generation_output = super().generate(input_ids=input_ids, input_features=None, **kwargs_text)
sequences = text_generation_output.sequences
if not generate_speech:
return text_generation_output
num_return_sequences = len(sequences) // batch_size
attention_mask = kwargs_speech.get('attention_mask', kwargs_text.get('attention_mask', None))
if self.current_modality == 'speech':
encoder_hidden_states = self.speech_encoder(input_features=input_features, attention_mask=attention_mask).last_hidden_state
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(encoder_hidden_states.device)
attention_mask = _compute_new_attention_mask(hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths)
else:
encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
if num_return_sequences > 1:
idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1)
idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1)
idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch + torch.arange(batch_size, device=self.device) * num_return_sequences
sequences = sequences[idx_most_probable_sequences_per_batch]
t2u_input_embeds = self.text_decoder(input_ids=sequences, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask).last_hidden_state
pad_token_id = self.generation_config.pad_token_id
seq_lens = (sequences != pad_token_id).int().sum(1)
t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
kwargs_speech['attention_mask'] = t2u_model_attention_mask
t2u_decoder_input_ids = kwargs_speech.get('decoder_input_ids')
t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang)
t2u_decoder_input_ids = torch.tensor([[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size, device=self.device)
kwargs_speech['decoder_input_ids'] = t2u_decoder_input_ids
unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech)
output_unit_ids = unit_ids.detach().clone()
unit_ids = unit_ids[:, kwargs_speech['decoder_input_ids'].shape[1]:]
unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id
unit_ids = torch.where(unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset)
vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids), device=self.device)
spkr_id = torch.tensor([[spkr_id]] * len(unit_ids), device=self.device)
waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id)
if return_intermediate_token_ids:
return SeamlessM4TGenerationOutput(waveform=waveform, waveform_lengths=waveform_lengths, sequences=sequences, unit_sequences=output_unit_ids)
return (waveform, waveform_lengths)
| null | 12
| 3
| 37
| 5
| 26
| 7
| 5
| 0.24
| 1
| 15
| 8
| 0
| 10
| 9
| 11
| 14
| 432
| 65
| 295
| 84
| 254
| 72
| 152
| 55
| 140
| 20
| 2
| 3
| 52
|
5,140
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TPreTrainedModel
|
import math
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...modeling_utils import PreTrainedModel
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
@auto_docstring
class SeamlessM4TPreTrainedModel(PreTrainedModel):
config: SeamlessM4TConfig
base_model_prefix = 'seamless_m4t'
supports_gradient_checkpointing = True
_no_split_modules = ['SeamlessM4TEncoderLayer', 'SeamlessM4TDecoderLayer', 'SeamlessM4TConformerEncoderLayer']
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, SeamlessM4TConformerSelfAttention):
if hasattr(module, 'pos_bias_u'):
nn.init.xavier_uniform_(module.pos_bias_u)
if hasattr(module, 'pos_bias_v'):
nn.init.xavier_uniform_(module.pos_bias_v)
elif isinstance(module, SeamlessM4TConformerPositionalConvEmbedding):
nn.init.normal_(module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)))
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, SeamlessM4TConformerFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm1d)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
kernel_size, stride = (self.config.adaptor_kernel_size, self.config.adaptor_stride)
pad = kernel_size // 2
seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1)
seq_lens = (seq_lens + 2 * pad - kernel_size) / stride + 1
return seq_lens.floor()
def compute_last_hidden_states_per_sample(self, hidden_states: tuple[tuple[torch.Tensor]], beam_indices: Optional[torch.Tensor]=None) -> torch.Tensor:
"""
Computes the last hidden states.
Parameters:
hidden_states (`tuple[tuple[torch.Tensor]]`):
The generated hidden states. Tuple (one element for each generated token) of tuples (one element for
each layer of the decoder) of torch.FloatTensor of shape (batch_size*num_beams*num_return_sequences,
generated_length, hidden_size).
beam_indices (`torch.LongTensor`, *optional*):
Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
`(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at
generate-time.
Return:
`torch.Tensor`: A `torch.Tensor` of shape `(batch_size*num_return_sequences, sequence_length, hidden_size)`
containing
the last hidden states.
```"""
last_hidden_states = torch.concat([hidden_states[-1] for hidden_states in hidden_states], dim=1)
if beam_indices is None:
return last_hidden_states
beam_indices_mask = beam_indices < 0
max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max()
beam_indices = beam_indices.clone()[:, :max_beam_length]
beam_indices_mask = beam_indices_mask[:, :max_beam_length]
beam_indices[beam_indices_mask] = 0
beam_indices = beam_indices.unsqueeze(-1)
beam_indices = beam_indices.expand(-1, -1, last_hidden_states.shape[-1])
last_hidden_states = torch.gather(last_hidden_states, 0, beam_indices)
return last_hidden_states
|
@auto_docstring
class SeamlessM4TPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
'''Initialize the weights'''
pass
def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
pass
def compute_last_hidden_states_per_sample(self, hidden_states: tuple[tuple[torch.Tensor]], beam_indices: Optional[torch.Tensor]=None) -> torch.Tensor:
'''
Computes the last hidden states.
Parameters:
hidden_states (`tuple[tuple[torch.Tensor]]`):
The generated hidden states. Tuple (one element for each generated token) of tuples (one element for
each layer of the decoder) of torch.FloatTensor of shape (batch_size*num_beams*num_return_sequences,
generated_length, hidden_size).
beam_indices (`torch.LongTensor`, *optional*):
Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
`(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at
generate-time.
Return:
`torch.Tensor`: A `torch.Tensor` of shape `(batch_size*num_return_sequences, sequence_length, hidden_size)`
containing
the last hidden states.
```'''
pass
| 5
| 2
| 32
| 3
| 19
| 9
| 5
| 0.52
| 1
| 4
| 3
| 10
| 3
| 0
| 3
| 3
| 108
| 14
| 62
| 20
| 54
| 32
| 48
| 16
| 44
| 13
| 1
| 2
| 16
|
5,141
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TScaledWordEmbedding
|
import torch
from typing import Optional, Union
from torch import Tensor, nn
class SeamlessM4TScaledWordEmbedding(nn.Embedding):
"""
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.embed_scale = embed_scale
def forward(self, input_ids: torch.Tensor):
return super().forward(input_ids) * self.embed_scale
|
class SeamlessM4TScaledWordEmbedding(nn.Embedding):
'''
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
'''
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0):
pass
def forward(self, input_ids: torch.Tensor):
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 1
| 0.5
| 1
| 4
| 0
| 0
| 2
| 1
| 2
| 2
| 11
| 2
| 6
| 4
| 3
| 3
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
5,142
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TSinusoidalPositionalEmbedding
|
import math
import torch
from typing import Optional, Union
from torch import Tensor, nn
class SeamlessM4TSinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, 'weights'):
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.register_buffer('weights', emb_weights, persistent=False)
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None):
"""
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(self, input_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values_length: int=0):
if input_ids is not None:
bsz, seq_len = input_ids.size()
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(input_ids.device)
else:
bsz, seq_len = inputs_embeds.size()[:-1]
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, self.padding_idx)
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
|
class SeamlessM4TSinusoidalPositionalEmbedding(nn.Module):
'''This module produces sinusoidal positional embeddings of any length.'''
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None):
pass
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None):
pass
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None):
'''
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
'''
pass
@torch.no_grad()
def forward(self, input_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values_length: int=0):
pass
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx):
'''
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
'''
pass
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
'''
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
'''
pass
| 11
| 4
| 13
| 2
| 9
| 3
| 2
| 0.34
| 1
| 3
| 0
| 0
| 4
| 3
| 5
| 15
| 76
| 13
| 47
| 22
| 37
| 16
| 38
| 18
| 32
| 3
| 1
| 1
| 10
|
5,143
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TSpeechEncoder
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
@auto_docstring(custom_intro='\n Transformer speech encoder consisting of *config.speech_encoder_layers* conformer self attention layers.\n Each layer is a [`SeamlessM4TConformerEncoderLayer`].\n ')
class SeamlessM4TSpeechEncoder(SeamlessM4TPreTrainedModel):
main_input_name = 'input_features'
def __init__(self, config: SeamlessM4TConfig):
super().__init__(config)
self.feature_projection = SeamlessM4TConformerFeatureProjection(config)
self.encoder = SeamlessM4TConformerEncoder(config)
self.intermediate_ffn = SeamlessM4TConformerFeedForward(config, act_fn='relu', dropout=0.0)
self.adapter = SeamlessM4TConformerAdapter(config) if config.add_adapter else None
self.inner_layer_norm = nn.LayerNorm(config.hidden_size)
self.post_init()
def forward(self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, Wav2Vec2BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_features is None:
raise ValueError('Both `input_features` and `inputs_embeds` are `None` in `SeamlessM4TSpeechEncoder.forward`.\n Make sure one of them is not `None`.')
hidden_states = self.feature_projection(input_features)
encoder_outputs = self.encoder(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = encoder_outputs[0]
expanded_hidden_states = self.intermediate_ffn(hidden_states)
hidden_states = hidden_states + 0.5 * expanded_hidden_states
if self.adapter is not None:
hidden_states = self.adapter(hidden_states, attention_mask=attention_mask)
hidden_states = self.inner_layer_norm(hidden_states)
if not return_dict:
return (hidden_states,) + encoder_outputs[1:]
return Wav2Vec2BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring(custom_intro='\n Transformer speech encoder consisting of *config.speech_encoder_layers* conformer self attention layers.\n Each layer is a [`SeamlessM4TConformerEncoderLayer`].\n ')
class SeamlessM4TSpeechEncoder(SeamlessM4TPreTrainedModel):
def __init__(self, config: SeamlessM4TConfig):
pass
def forward(self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, Wav2Vec2BaseModelOutput]:
pass
| 4
| 0
| 30
| 6
| 24
| 1
| 5
| 0.02
| 1
| 10
| 6
| 0
| 2
| 5
| 2
| 5
| 64
| 13
| 50
| 20
| 39
| 1
| 27
| 12
| 24
| 7
| 2
| 1
| 9
|
5,144
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
import copy
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n Transformer text-to-unit encoder-decoder with a language model head. The base encoder-decoder model is a [`SeamlessM4TTextToUnit`].\n ')
class SeamlessM4TTextToUnitForConditionalGeneration(SeamlessM4TPreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = ['vocoder', 'speech_encoder', 'text_encoder', 'text_decoder']
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config: SeamlessM4TConfig, embed_tokens_decoder: Optional[nn.Embedding]=None):
"""
embed_tokens_decoder (`nn.Embedding`, *optional*):
input embedding of the decoder.
"""
config = copy.deepcopy(config)
for param, val in config.to_dict().items():
if param.startswith('t2u_'):
config.__setattr__(param[4:], val)
super().__init__(config)
self.model = SeamlessM4TTextToUnitModel(config, embed_tokens_decoder)
self.lm_head = nn.Linear(config.hidden_size, config.t2u_vocab_size, bias=False)
self.post_init()
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
@auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS)
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.t2u_pad_token_id, self.config.t2u_decoder_start_token_id)
outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
lm_logits = self.lm_head(outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.t2u_pad_token_id, self.config.t2u_decoder_start_token_id)
def _tie_weights(self) -> None:
if getattr(self.config, 'tie_word_embeddings', True):
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
|
@auto_docstring(custom_intro='\n Transformer text-to-unit encoder-decoder with a language model head. The base encoder-decoder model is a [`SeamlessM4TTextToUnit`].\n ')
class SeamlessM4TTextToUnitForConditionalGeneration(SeamlessM4TPreTrainedModel, GenerationMixin):
def __init__(self, config: SeamlessM4TConfig, embed_tokens_decoder: Optional[nn.Embedding]=None):
'''
embed_tokens_decoder (`nn.Embedding`, *optional*):
input embedding of the decoder.
'''
pass
def get_encoder(self):
pass
def get_decoder(self):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
@auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS)
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
'''
pass
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
pass
def _tie_weights(self) -> None:
pass
| 11
| 2
| 10
| 1
| 9
| 0
| 2
| 0.03
| 2
| 7
| 3
| 0
| 10
| 2
| 11
| 14
| 130
| 19
| 108
| 46
| 75
| 3
| 55
| 25
| 43
| 8
| 2
| 2
| 23
|
5,145
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitModel
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from .configuration_seamless_m4t import SeamlessM4TConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
@auto_docstring(custom_intro='\n Transformer bare text-to-unit encoder-decoder. The encoder is a [`SeamlessM4TEncoder`] without embeddings and the decoder is a [`SeamlessM4TDecoder`].\n ')
class SeamlessM4TTextToUnitModel(SeamlessM4TPreTrainedModel):
def __init__(self, config: SeamlessM4TConfig, embed_tokens_decoder: Optional[nn.Embedding]=None):
"""
embed_tokens_decoder (`nn.Embedding`, *optional*):
input embedding of the decoder.
"""
super().__init__(config)
self.encoder = SeamlessM4TEncoder(config, is_t2u_encoder=True)
self.decoder = SeamlessM4TDecoder(config, embed_tokens_decoder)
self.post_init()
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
|
@auto_docstring(custom_intro='\n Transformer bare text-to-unit encoder-decoder. The encoder is a [`SeamlessM4TEncoder`] without embeddings and the decoder is a [`SeamlessM4TDecoder`].\n ')
class SeamlessM4TTextToUnitModel(SeamlessM4TPreTrainedModel):
def __init__(self, config: SeamlessM4TConfig, embed_tokens_decoder: Optional[nn.Embedding]=None):
'''
embed_tokens_decoder (`nn.Embedding`, *optional*):
input embedding of the decoder.
'''
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]:
pass
| 4
| 1
| 39
| 3
| 35
| 2
| 6
| 0.04
| 1
| 8
| 5
| 0
| 2
| 2
| 2
| 5
| 80
| 7
| 70
| 24
| 49
| 3
| 18
| 6
| 15
| 10
| 2
| 1
| 11
|
5,146
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TVariancePredictor
|
from torch import Tensor, nn
class SeamlessM4TVariancePredictor(nn.Module):
def __init__(self, config):
super().__init__()
embed_dim = config.unit_embed_dim
kernel_size = config.variance_predictor_kernel_size
var_pred_dropout = config.var_pred_dropout
self.conv1 = nn.Conv1d(embed_dim, embed_dim, kernel_size=kernel_size, padding=(kernel_size - 1) // 2)
self.activation_function = nn.ReLU()
self.ln1 = nn.LayerNorm(embed_dim)
self.dropout_module = nn.Dropout(p=var_pred_dropout)
self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=kernel_size, padding=1)
self.ln2 = nn.LayerNorm(embed_dim)
self.proj = nn.Linear(embed_dim, 1)
def forward(self, hidden_states: Tensor) -> Tensor:
hidden_states = self.conv1(hidden_states.transpose(1, 2))
hidden_states = self.activation_function(hidden_states).transpose(1, 2)
hidden_states = self.dropout_module(self.ln1(hidden_states))
hidden_states = self.conv2(hidden_states.transpose(1, 2))
hidden_states = self.activation_function(hidden_states).transpose(1, 2)
hidden_states = self.dropout_module(self.ln2(hidden_states))
return self.proj(hidden_states).squeeze(dim=2)
|
class SeamlessM4TVariancePredictor(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: Tensor) -> Tensor:
pass
| 3
| 0
| 17
| 1
| 15
| 1
| 1
| 0.03
| 1
| 2
| 0
| 0
| 2
| 7
| 2
| 12
| 35
| 3
| 31
| 13
| 28
| 1
| 21
| 13
| 18
| 1
| 1
| 0
| 2
|
5,147
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/processing_seamless_m4t.py
|
transformers.models.seamless_m4t.processing_seamless_m4t.SeamlessM4TProcessor
|
from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...audio_utils import AudioInput
from typing import Optional, Union
from ...utils.deprecation import deprecate_kwarg
class SeamlessM4TProcessor(ProcessorMixin):
"""
Constructs a SeamlessM4T processor which wraps a SeamlessM4T feature extractor and a SeamlessM4T tokenizer into a
single processor.
[`SeamlessM4TProcessor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and
[`SeamlessM4TTokenizerFast`]. See the [`~SeamlessM4TProcessor.__call__`] and [`~SeamlessM4TProcessor.decode`] for
more information.
Args:
feature_extractor ([`SeamlessM4TFeatureExtractor`]):
The audio processor is a required input.
tokenizer ([`SeamlessM4TTokenizerFast`]):
The tokenizer is a required input.
"""
feature_extractor_class = 'SeamlessM4TFeatureExtractor'
tokenizer_class = ('SeamlessM4TTokenizer', 'SeamlessM4TTokenizerFast')
valid_processor_kwargs = SeamlessM4TProcessorKwargs
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
@deprecate_kwarg('audios', version='v4.59.0', new_name='audio')
def __call__(self, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audios: Optional[AudioInput]=None, audio: Optional[AudioInput]=None, **kwargs: Unpack[ProcessingKwargs]):
"""
Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
and `kwargs` arguments to SeamlessM4TTokenizerFast's [`~SeamlessM4TTokenizerFast.__call__`] if `text` is not
`None` to encode the text. To prepare the audio(s), this method forwards the `audios` and `kwargs` arguments to
SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audios` is not `None`. Please refer
to the docstring of the above two methods for more information.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
audios (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels,
and T the sample length of the audio.
Returns:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **input_features** -- Audio input features to be fed to a model. Returned when `audios` is not `None`.
"""
if text is not None and audios is not None:
raise ValueError('Text and audios are mututally exclusive when passed to `SeamlessM4T`. Specify one or another.')
if audio is None and audios is not None:
logger.warning('Passing `audios` as keyword argument is deprecated and will be removed in v4.63, please pass `audio` instead.')
audio = audios
return super().__call__(text=text, audio=audio, **kwargs)
|
class SeamlessM4TProcessor(ProcessorMixin):
'''
Constructs a SeamlessM4T processor which wraps a SeamlessM4T feature extractor and a SeamlessM4T tokenizer into a
single processor.
[`SeamlessM4TProcessor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and
[`SeamlessM4TTokenizerFast`]. See the [`~SeamlessM4TProcessor.__call__`] and [`~SeamlessM4TProcessor.decode`] for
more information.
Args:
feature_extractor ([`SeamlessM4TFeatureExtractor`]):
The audio processor is a required input.
tokenizer ([`SeamlessM4TTokenizerFast`]):
The tokenizer is a required input.
'''
def __init__(self, feature_extractor, tokenizer):
pass
@deprecate_kwarg('audios', version='v4.59.0', new_name='audio')
def __call__(self, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audios: Optional[AudioInput]=None, audio: Optional[AudioInput]=None, **kwargs: Unpack[ProcessingKwargs]):
'''
Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
and `kwargs` arguments to SeamlessM4TTokenizerFast's [`~SeamlessM4TTokenizerFast.__call__`] if `text` is not
`None` to encode the text. To prepare the audio(s), this method forwards the `audios` and `kwargs` arguments to
SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audios` is not `None`. Please refer
to the docstring of the above two methods for more information.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
audios (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels,
and T the sample length of the audio.
Returns:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **input_features** -- Audio input features to be fed to a model. Returned when `audios` is not `None`.
'''
pass
| 4
| 2
| 14
| 1
| 6
| 8
| 2
| 1.59
| 1
| 4
| 0
| 0
| 5
| 0
| 5
| 22
| 96
| 13
| 32
| 13
| 25
| 51
| 26
| 12
| 20
| 6
| 2
| 2
| 10
|
5,148
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py
|
transformers.models.seamless_m4t.tokenization_seamless_m4t.SeamlessM4TTokenizer
|
from ...tokenization_utils import BatchEncoding, PreTokenizedInput, PreTrainedTokenizer, TextInput
from shutil import copyfile
from ...utils import PaddingStrategy, logging
import os
from ...convert_slow_tokenizer import import_protobuf
from ...tokenization_utils_base import AddedToken
from typing import Any, Optional, Union
from ...utils.import_utils import requires
import sentencepiece as spm
@requires(backends=('sentencepiece',))
class SeamlessM4TTokenizer(PreTrainedTokenizer):
"""
Construct a SeamlessM4T tokenizer.
Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
The tokenization method is `<language code> <tokens> <eos>` for source language documents, and `<eos> <language
code> <tokens> <eos>` for target language documents.
Examples:
```python
>>> from transformers import SeamlessM4TTokenizer
>>> tokenizer = SeamlessM4TTokenizer.from_pretrained(
... "facebook/hf-seamless-m4t-medium", src_lang="eng", tgt_lang="fra"
... )
>>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
>>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
```
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
tokenizer_file (`str`, *optional*):
The path to a tokenizer file to use instead of the vocab file.
src_lang (`str`, *optional*, defaults to `"eng"`):
The language to use as source language for translation.
tgt_lang (`str`, *optional*, defaults to `"fra"`):
The language to use as target language for translation.
sp_model_kwargs (`dict[str, Any]`, *optional*):
Additional keyword arguments to pass to the model initialization.
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
A tuple or a list of additional special tokens. Can be used to specify the list of languages that will be
supported by the tokenizer.
add_prefix_space (`bool`, *optional*, defaults to `True`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
prefix_tokens: list[int] = []
suffix_tokens: list[int] = []
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', tokenizer_file=None, src_lang='eng', tgt_lang='fra', sp_model_kwargs: Optional[dict[str, Any]]=None, additional_special_tokens=None, add_prefix_space=True, **kwargs):
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.legacy = False
self.vocab_file = vocab_file
self.sp_model = self.get_spm_processor(kwargs.pop('from_slow', False))
self._added_tokens_decoder = {0: AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token, 1: AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token, 2: AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token, 3: AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token}
self.fairseq_offset = 1
self.sp_model_size = len(self.sp_model)
self._src_lang = f'__{src_lang}__' if '__' not in src_lang else src_lang
self._tgt_lang = f'__{tgt_lang}__' if '__' not in tgt_lang else tgt_lang
self.add_prefix_space = add_prefix_space
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, add_prefix_space=add_prefix_space, **kwargs)
self.set_src_lang_special_tokens(self._src_lang)
self.set_tgt_lang_special_tokens(self._tgt_lang)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
state['sp_model_proto'] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__ = d
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def vocab_size(self):
return len(self.sp_model)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, padding: Union[bool, str, PaddingStrategy]=True, pad_to_multiple_of: Optional[int]=2, src_lang: Optional[str]=None, tgt_lang: Optional[str]=None, **kwargs):
"""
Args:
text (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
src_lang (`str`, *optional*):
A string representing the source language. If not specified, the last `src_lang` specified (either
during initialization or when calling this tokenizer) will be used.
tgt_lang (`str`, *optional*):
A string representing the target language. If not specified, the last `tgt_lang` specified (either
during initialization or when calling this tokenizer) will be used.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`PreTrainedTokenizer.__call__`].
"""
if src_lang is not None:
self.src_lang = src_lang
if tgt_lang is not None:
self.tgt_lang = tgt_lang
output = super().__call__(text=text, text_pair=text_pair, text_target=text_target, text_pair_target=text_pair_target, padding=padding, pad_to_multiple_of=pad_to_multiple_of, **kwargs)
return BatchEncoding(output, tensor_type=kwargs.get('return_tensors'))
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
if '__' not in new_src_lang:
self._src_lang = f'__{new_src_lang}__'
else:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def tgt_lang(self) -> str:
return self._tgt_lang
@tgt_lang.setter
def tgt_lang(self, new_tgt_lang: str) -> None:
if '__' not in new_tgt_lang:
self._tgt_lang = f'__{new_tgt_lang}__'
else:
self._tgt_lang = new_tgt_lang
self.set_tgt_lang_special_tokens(self._tgt_lang)
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1] * len(self.suffix_tokens)
if token_ids_1 is None:
return prefix_ones + [0] * len(token_ids_0) + suffix_ones
return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `X [eos, src_lang_code]`
- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model.')
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
if '__' not in tgt_lang:
tgt_lang = f'__{tgt_lang}__'
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs['forced_bos_token_id'] = tgt_lang_id
return inputs
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.fairseq_offset, self.vocab_size + self.fairseq_offset)}
vocab.update(self.added_tokens_encoder)
return vocab
@property
def unk_token_length(self):
return len(self.sp_model.encode(str(self.unk_token)))
def get_spm_processor(self, from_slow=False):
tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
if self.legacy or from_slow:
tokenizer.Load(self.vocab_file)
return tokenizer
with open(self.vocab_file, 'rb') as f:
sp_model = f.read()
model_pb2 = import_protobuf(f'The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)')
model = model_pb2.ModelProto.FromString(sp_model)
normalizer_spec = model_pb2.NormalizerSpec()
normalizer_spec.add_dummy_prefix = False
model.normalizer_spec.MergeFrom(normalizer_spec)
sp_model = model.SerializeToString()
tokenizer.LoadFromSerializedProto(sp_model)
return tokenizer
def tokenize(self, text: 'TextInput', **kwargs) -> list[str]:
"""
Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
first token is special.
"""
if self.legacy or len(text) == 0:
return super().tokenize(text, **kwargs)
text = text.replace(SPIECE_UNDERLINE, ' ')
if self.add_prefix_space:
text = SPIECE_UNDERLINE + text
tokens = super().tokenize(text, **kwargs)
if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and (tokens[1] in self.all_special_tokens):
tokens = tokens[1:]
return tokens
def _tokenize(self, text, **kwargs):
"""
Returns a tokenized string.
We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
`['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
`unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
`self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
"""
if self.legacy or not text.startswith((SPIECE_UNDERLINE, ' ')):
return self.sp_model.encode(text, out_type=str)
tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
return tokens[self.unk_token_length:] if len(tokens) >= self.unk_token_length else tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
spm_id = self.sp_model.PieceToId(token)
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
tokens[0] = tokens[0][1:]
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='eng', tgt_texts: Optional[list[str]]=None, tgt_lang: str='fra', **kwargs) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
"""Reset the special tokens to the source lang setting.
Prefix=[src_lang_code], suffix = [eos]
"""
self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
self.init_kwargs['src_lang'] = src_lang
if self.cur_lang_code == self.unk_token_id:
logger.warning_once(f'`src_lang={src_lang}` has not be found in the vocabulary. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id.')
self.prefix_tokens = [self.cur_lang_code]
self.suffix_tokens = [self.eos_token_id]
def set_tgt_lang_special_tokens(self, lang: str) -> None:
"""Reset the special tokens to the target lang setting.
Prefix=[eos, tgt_lang_code] and suffix=[eos].
"""
self.cur_lang_code = self.convert_tokens_to_ids(lang)
self.init_kwargs['tgt_lang'] = lang
if self.cur_lang_code == self.unk_token_id:
logger.warning_once(f'`tgt_lang={lang}` has not be found in the vocabulary. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id.')
self.prefix_tokens = [self.eos_token_id, self.cur_lang_code]
self.suffix_tokens = [self.eos_token_id]
|
@requires(backends=('sentencepiece',))
class SeamlessM4TTokenizer(PreTrainedTokenizer):
'''
Construct a SeamlessM4T tokenizer.
Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
The tokenization method is `<language code> <tokens> <eos>` for source language documents, and `<eos> <language
code> <tokens> <eos>` for target language documents.
Examples:
```python
>>> from transformers import SeamlessM4TTokenizer
>>> tokenizer = SeamlessM4TTokenizer.from_pretrained(
... "facebook/hf-seamless-m4t-medium", src_lang="eng", tgt_lang="fra"
... )
>>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
>>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
```
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
tokenizer_file (`str`, *optional*):
The path to a tokenizer file to use instead of the vocab file.
src_lang (`str`, *optional*, defaults to `"eng"`):
The language to use as source language for translation.
tgt_lang (`str`, *optional*, defaults to `"fra"`):
The language to use as target language for translation.
sp_model_kwargs (`dict[str, Any]`, *optional*):
Additional keyword arguments to pass to the model initialization.
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
A tuple or a list of additional special tokens. Can be used to specify the list of languages that will be
supported by the tokenizer.
add_prefix_space (`bool`, *optional*, defaults to `True`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word.
'''
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', tokenizer_file=None, src_lang='eng', tgt_lang='fra', sp_model_kwargs: Optional[dict[str, Any]]=None, additional_special_tokens=None, add_prefix_space=True, **kwargs):
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
@property
def vocab_size(self):
pass
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, padding: Union[bool, str, PaddingStrategy]=True, pad_to_multiple_of: Optional[int]=2, src_lang: Optional[str]=None, tgt_lang: Optional[str]=None, **kwargs):
'''
Args:
text (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
src_lang (`str`, *optional*):
A string representing the source language. If not specified, the last `src_lang` specified (either
during initialization or when calling this tokenizer) will be used.
tgt_lang (`str`, *optional*):
A string representing the target language. If not specified, the last `tgt_lang` specified (either
during initialization or when calling this tokenizer) will be used.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`PreTrainedTokenizer.__call__`].
'''
pass
@property
def src_lang(self) -> str:
pass
@src_lang.setter
def src_lang(self) -> str:
pass
@property
def tgt_lang(self) -> str:
pass
@tgt_lang.setter
def tgt_lang(self) -> str:
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `X [eos, src_lang_code]`
- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
'''Used by translation pipeline, to prepare inputs for the generate function'''
pass
def get_vocab(self):
pass
@property
def unk_token_length(self):
pass
def get_spm_processor(self, from_slow=False):
pass
def tokenize(self, text: 'TextInput', **kwargs) -> list[str]:
'''
Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
first token is special.
'''
pass
def _tokenize(self, text, **kwargs):
'''
Returns a tokenized string.
We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
`['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
`unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
`self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (strings for sub-words) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='eng', tgt_texts: Optional[list[str]]=None, tgt_lang: str='fra', **kwargs) -> BatchEncoding:
pass
def _switch_to_input_mode(self):
pass
def _switch_to_target_mode(self):
pass
def set_src_lang_special_tokens(self, src_lang) -> None:
'''Reset the special tokens to the source lang setting.
Prefix=[src_lang_code], suffix = [eos]
'''
pass
def set_tgt_lang_special_tokens(self, lang: str) -> None:
'''Reset the special tokens to the target lang setting.
Prefix=[eos, tgt_lang_code] and suffix=[eos].
'''
pass
| 35
| 13
| 15
| 2
| 9
| 4
| 2
| 0.75
| 1
| 8
| 1
| 0
| 27
| 12
| 27
| 116
| 520
| 84
| 250
| 116
| 172
| 187
| 160
| 64
| 132
| 8
| 3
| 2
| 59
|
5,149
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py
|
transformers.models.seamless_m4t.tokenization_seamless_m4t_fast.SeamlessM4TTokenizerFast
|
from tokenizers import processors
from ...utils import PaddingStrategy, is_sentencepiece_available, logging
from shutil import copyfile
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...tokenization_utils import BatchEncoding, PreTokenizedInput, TextInput
from typing import Optional, Union
import os
class SeamlessM4TTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" SeamlessM4T tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
The tokenization method is `<language code> <tokens> <eos>` for source language documents, and `<eos> <language
code> <tokens> <eos>` for target language documents.
Examples:
```python
>>> from transformers import SeamlessM4TTokenizerFast
>>> tokenizer = SeamlessM4TTokenizerFast.from_pretrained(
... "facebook/hf-seamless-m4t-medium", src_lang="eng", tgt_lang="fra"
... )
>>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
>>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
```
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
tokenizer_file (`str`, *optional*):
The path to a tokenizer file to use instead of the vocab file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
src_lang (`str`, *optional*, defaults to `"eng"`):
The language to use as source language for translation.
tgt_lang (`str`, *optional*, defaults to `"fra"`):
The language to use as target language for translation.
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
A tuple or a list of additional special tokens.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = SeamlessM4TTokenizer
model_input_names = ['input_ids', 'attention_mask']
prefix_tokens: list[int] = []
suffix_tokens: list[int] = []
def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', src_lang='eng', tgt_lang='fra', additional_special_tokens=None, **kwargs):
super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, **kwargs)
self.vocab_file = vocab_file
self._src_lang = f'__{src_lang}__' if '__' not in src_lang else src_lang
self._tgt_lang = f'__{tgt_lang}__' if '__' not in tgt_lang else tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
self.set_tgt_lang_special_tokens(self._tgt_lang)
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
if '__' not in new_src_lang:
self._src_lang = f'__{new_src_lang}__'
else:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def tgt_lang(self) -> str:
return self._tgt_lang
@tgt_lang.setter
def tgt_lang(self, new_tgt_lang: str) -> None:
if '__' not in new_tgt_lang:
self._tgt_lang = f'__{new_tgt_lang}__'
else:
self._tgt_lang = new_tgt_lang
self.set_tgt_lang_special_tokens(self._tgt_lang)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. The special tokens depend on calling set_lang.
An SeamlessM4T sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `[src_lang_code] X [eos]`
- `decoder_input_ids`: (for decoder) `[eos, tgt_lang_code] X [eos]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
if '__' not in tgt_lang:
tgt_lang = f'__{tgt_lang}__'
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs['forced_bos_token_id'] = tgt_lang_id
return inputs
def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='eng', tgt_texts: Optional[list[str]]=None, tgt_lang: str='fra', **kwargs) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
"""Reset the special tokens to the source lang setting.
Prefix=[src_lang_code], suffix = [eos]
"""
self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
if self.cur_lang_code == self.unk_token_id:
logger.warning_once(f'`tgt_lang={src_lang}` has not be found in the `vocabulary`. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id.')
self.init_kwargs['src_lang'] = src_lang
self.prefix_tokens = [self.cur_lang_code]
self.suffix_tokens = [self.eos_token_id]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)))
def set_tgt_lang_special_tokens(self, lang: str) -> None:
"""Reset the special tokens to the target lang setting.
Prefix=[eos, tgt_lang_code] and suffix=[eos].
"""
self.cur_lang_code = self.convert_tokens_to_ids(lang)
if self.cur_lang_code == self.unk_token_id:
logger.warning_once(f'`tgt_lang={lang}` has not be found in the `vocabulary`. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id.')
self.init_kwargs['tgt_lang'] = lang
self.prefix_tokens = [self.eos_token_id, self.cur_lang_code]
self.suffix_tokens = [self.eos_token_id]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
@classmethod
def _from_pretrained(cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, token=None, cache_dir=None, local_files_only=False, _commit_hash=None, _is_local=False, **kwargs):
tokenizer = super()._from_pretrained(resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, token=token, cache_dir=cache_dir, local_files_only=local_files_only, _commit_hash=_commit_hash, _is_local=_is_local, **kwargs)
tokenizer.set_src_lang_special_tokens(tokenizer._src_lang)
tokenizer.set_tgt_lang_special_tokens(tokenizer._tgt_lang)
return tokenizer
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, padding: Union[bool, str, PaddingStrategy]=True, pad_to_multiple_of: Optional[int]=2, src_lang: Optional[str]=None, tgt_lang: Optional[str]=None, **kwargs):
"""
Args:
text (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
src_lang (`str`, *optional*):
A string representing the source language. If not specified, the last `src_lang` specified (either
during initialization or when calling this tokenizer) will be used.
tgt_lang (`str`, *optional*):
A string representing the target language. If not specified, the last `tgt_lang` specified (either
during initialization or when calling this tokenizer) will be used.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`PreTrainedTokenizerFast.__call__`].
"""
if src_lang is not None:
self.src_lang = src_lang
if tgt_lang is not None:
self.tgt_lang = tgt_lang
output = super().__call__(text=text, text_pair=text_pair, text_target=text_target, text_pair_target=text_pair_target, padding=padding, pad_to_multiple_of=pad_to_multiple_of, **kwargs)
return output
|
class SeamlessM4TTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" SeamlessM4T tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
The tokenization method is `<language code> <tokens> <eos>` for source language documents, and `<eos> <language
code> <tokens> <eos>` for target language documents.
Examples:
```python
>>> from transformers import SeamlessM4TTokenizerFast
>>> tokenizer = SeamlessM4TTokenizerFast.from_pretrained(
... "facebook/hf-seamless-m4t-medium", src_lang="eng", tgt_lang="fra"
... )
>>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
>>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
```
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
tokenizer_file (`str`, *optional*):
The path to a tokenizer file to use instead of the vocab file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
src_lang (`str`, *optional*, defaults to `"eng"`):
The language to use as source language for translation.
tgt_lang (`str`, *optional*, defaults to `"fra"`):
The language to use as target language for translation.
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
A tuple or a list of additional special tokens.
'''
def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', src_lang='eng', tgt_lang='fra', additional_special_tokens=None, **kwargs):
pass
@property
def src_lang(self) -> str:
pass
@src_lang.setter
def src_lang(self) -> str:
pass
@property
def tgt_lang(self) -> str:
pass
@tgt_lang.setter
def tgt_lang(self) -> str:
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. The special tokens depend on calling set_lang.
An SeamlessM4T sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `[src_lang_code] X [eos]`
- `decoder_input_ids`: (for decoder) `[eos, tgt_lang_code] X [eos]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
'''Used by translation pipeline, to prepare inputs for the generate function'''
pass
def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='eng', tgt_texts: Optional[list[str]]=None, tgt_lang: str='fra', **kwargs) -> BatchEncoding:
pass
def _switch_to_input_mode(self):
pass
def _switch_to_target_mode(self):
pass
def set_src_lang_special_tokens(self, src_lang) -> None:
'''Reset the special tokens to the source lang setting.
Prefix=[src_lang_code], suffix = [eos]
'''
pass
def set_tgt_lang_special_tokens(self, lang: str) -> None:
'''Reset the special tokens to the target lang setting.
Prefix=[eos, tgt_lang_code] and suffix=[eos].
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
@classmethod
def _from_pretrained(cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, token=None, cache_dir=None, local_files_only=False, _commit_hash=None, _is_local=False, **kwargs):
pass
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, padding: Union[bool, str, PaddingStrategy]=True, pad_to_multiple_of: Optional[int]=2, src_lang: Optional[str]=None, tgt_lang: Optional[str]=None, **kwargs):
'''
Args:
text (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
src_lang (`str`, *optional*):
A string representing the source language. If not specified, the last `src_lang` specified (either
during initialization or when calling this tokenizer) will be used.
tgt_lang (`str`, *optional*):
A string representing the target language. If not specified, the last `tgt_lang` specified (either
during initialization or when calling this tokenizer) will be used.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`PreTrainedTokenizerFast.__call__`].
'''
pass
| 22
| 7
| 18
| 2
| 12
| 4
| 2
| 0.65
| 1
| 8
| 1
| 0
| 16
| 4
| 17
| 105
| 406
| 63
| 208
| 96
| 132
| 135
| 99
| 38
| 81
| 5
| 3
| 1
| 34
|
5,150
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.configuration_seamless_m4t_v2.SeamlessM4Tv2Config
|
from ...configuration_utils import PretrainedConfig
class SeamlessM4Tv2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`~SeamlessM4Tv2Model`]. It is used to instantiate
an SeamlessM4Tv2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SeamlessM4Tv2
[""](https://huggingface.co/"") architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256102):
Vocabulary size of the text modality of the SeamlessM4Tv2 model. Defines the number of different tokens
that can be represented by the `inputs_ids` passed when calling [`~SeamlessM4Tv2Model`],
[`~SeamlessM4Tv2ForTextToSpeech`] or [`~SeamlessM4Tv2ForTextToText`].
t2u_vocab_size (`int`, *optional*, defaults to 10082):
Unit vocabulary size of the SeamlessM4Tv2 model. Defines the number of different "unit tokens" that can be
represented by the `inputs_ids` passed when calling the Text-To-Units sub-model of [`~SeamlessM4Tv2Model`],
[`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
char_vocab_size (`int`, *optional*, defaults to 10943):
Character vocabulary size of the SeamlessM4Tv2 model. Defines the number of different character tokens that
can be represented by the `char_inputs_ids` passed when calling the Text-To-Units sub-model of
[`~SeamlessM4Tv2Model`], [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
> Parameters shared across sub-models
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" layers in the architecture.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model text encoder and decoder might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
encoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the encoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the decoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder and feed-forward layers. If string,
`"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, decoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all attention layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all activation layers in the model.
scale_embedding (`bool`, *optional*, defaults to `True`):
Scale embeddings by diving by sqrt(d_model).
> Text encoder and text decoder specific parameters
encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text encoder.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text encoder.
decoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text decoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text decoder.
decoder_start_token_id (`int`, *optional*, defaults to 3):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
applied in the text decoder.
max_new_tokens (`int`, *optional*, defaults to 256):
The maximum numbers of text tokens to generate, ignoring the number of tokens in the prompt.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the _padding_ text token. Only applied to the text-decoder model.
bos_token_id (`int`, *optional*, defaults to 2):
The id of the _beginning-of-stream_ text token. Only applied to the text-decoder model.
eos_token_id (`int`, *optional*, defaults to 3):
The id of the _end-of-stream_ text token. Only applied to the text-decoder model.
> Speech encoder specific parameters
speech_encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer speech encoder.
speech_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer speech encoder.
speech_encoder_intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer speech encoder.
speech_encoder_hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
The non-linear activation function (function or string) in the speech encoder. If string, `"gelu"`,
`"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
speech_encoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all layers in the speech encoder.
add_adapter (`bool`, *optional*, defaults to `True`):
Add an adapter layer on top of the speech encoder.
speech_encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability for the speech encoder. See the [LayerDrop paper](see
https://huggingface.co/papers/1909.11556) for more details.
feature_projection_input_dim (`int`, *optional*, defaults to 160):
Input dimension of the input feature projection of the speech encoder, i.e the dimension after processing
input audios with [`SeamlessM4TFeatureExtractor`].
adaptor_kernel_size (`int`, *optional*, defaults to 8):
Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_stride (`int`, *optional*, defaults to 8):
Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all layers in the speech adapter.
num_adapter_layers (`int`, *optional*, defaults to 1):
Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
True`.
position_embeddings_type (`str`, *optional*, defaults to `"relative_key"`):
Can be specified to `relative_key`. If left to `None`, no relative position embedding is applied. Only
applied to the speech encoder. For more information on `"relative_key"`, please refer to [Self-Attention
with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
Kernel size of convolutional depthwise 1D layer in Conformer blocks. Only applied to the speech encoder.
left_max_position_embeddings (`int`, *optional*, defaults to 64):
The left clipping value for relative positions.
right_max_position_embeddings (`int`, *optional*, defaults to 8):
The right clipping value for relative positions.
speech_encoder_chunk_size (`int`, *optional*, defaults to 20000): The size of each attention chunk.
speech_encoder_left_chunk_num (`int`, *optional*, defaults to 128):
Number of chunks on the left up to which lookahead is allowed.
> Text-To-Unit (t2u) model specific parameters
t2u_bos_token_id (`int`, *optional*, defaults to 0):
The id of the _beginning-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_pad_token_id (`int`, *optional*, defaults to 1):
The id of the _padding_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_eos_token_id (`int`, *optional*, defaults to 2):
The id of the _end-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_encoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit encoder.
t2u_encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit encoder.
t2u_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit encoder.
t2u_decoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit decoder.
t2u_decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit decoder.
t2u_decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit decoder.
t2u_max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model text-to-unit component might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
t2u_variance_predictor_embed_dim (`int`, *optional*, defaults to 1024):
The projection dimension of the text-to-unit's duration predictor.
t2u_variance_predictor_hidden_dim (`int`, *optional*, defaults to 256):
Internal dimension of the text-to-unit's duration predictor.
t2u_variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the convolutional layers of the text-to-unit's duration predictor.
t2u_variance_pred_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability of the text-to-unit's duration predictor.
> Hifi-Gan Vocoder specific parameters
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the output audio will be generated, expressed in hertz (Hz).
upsample_initial_channel (`int`, *optional*, defaults to 512):
The number of input channels into the hifi-gan upsampling network. Applies to the vocoder only.
upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[5, 4, 4, 2, 2]`):
A tuple of integers defining the stride of each 1D convolutional layer in the vocoder upsampling network.
The length of *upsample_rates* defines the number of convolutional layers and has to match the length of
*upsample_kernel_sizes*. Applies to the vocoder only.
upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[11, 8, 8, 4, 4]`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the vocoder upsampling
network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match
the length of *upsample_rates*. Applies to the vocoder only.
resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`):
A tuple of integers defining the kernel sizes of the vocoder 1D convolutional layers in the multi-receptive
field fusion (MRF) module. Applies to the vocoder only.
resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
A nested tuple of integers defining the dilation rates of the vocoder dilated 1D convolutional layers in
the multi-receptive field fusion (MRF) module. Applies to the vocoder only.
leaky_relu_slope (`float`, *optional*, defaults to 0.1):
The angle of the negative slope used by the leaky ReLU activation in the vocoder. Applies to the vocoder
only.
unit_hifi_gan_vocab_size (`int`, *optional*, defaults to 10000):
Vocabulary size of the SeamlessM4Tv2 vocoder. Defines the number of different unit tokens that can be
represented by the `inputs_ids` passed when calling the vocoder of [`~SeamlessM4Tv2Model`],
[`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
unit_embed_dim (`int`, *optional*, defaults to 1280):
The projection dimension of the input ids given to the hifi-gan vocoder. Applies to the vocoder only.
lang_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the target language given to the hifi-gan vocoder. Applies to the vocoder only.
spkr_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the speaker id given to the hifi-gan vocoder. Applies to the vocoder only.
vocoder_num_langs (`int`, *optional*, defaults to 36):
Number of langs supported by the vocoder. Might be different from `t2u_num_langs`.
vocoder_num_spkrs (`int`, *optional*, defaults to 200):
Number of speakers supported by the vocoder.
variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the duration predictor. Applies to the vocoder only.
var_pred_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability of the duration predictor. Applies to the vocoder only.
vocoder_offset (`int`, *optional*, defaults to 4):
Offset the unit token ids by this number to account for symbol tokens. Applies to the vocoder only.
```python
>>> from transformers import SeamlessM4Tv2Model, SeamlessM4Tv2Config
>>> # Initializing a SeamlessM4Tv2 "" style configuration
>>> configuration = SeamlessM4Tv2Config()
>>> # Initializing a model from the "" style configuration
>>> model = SeamlessM4Tv2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'seamless_m4t_v2'
def __init__(self, vocab_size=256102, t2u_vocab_size=10082, char_vocab_size=10943, hidden_size=1024, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, max_position_embeddings=4096, is_encoder_decoder=True, encoder_layerdrop=0.05, decoder_layerdrop=0.05, activation_function='relu', dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, scale_embedding=True, encoder_layers=24, encoder_ffn_dim=8192, encoder_attention_heads=16, decoder_layers=24, decoder_ffn_dim=8192, decoder_attention_heads=16, decoder_start_token_id=3, max_new_tokens=256, pad_token_id=0, bos_token_id=2, eos_token_id=3, speech_encoder_layers=24, speech_encoder_attention_heads=16, speech_encoder_intermediate_size=4096, speech_encoder_hidden_act='swish', speech_encoder_dropout=0.0, add_adapter=True, speech_encoder_layerdrop=0.1, feature_projection_input_dim=160, adaptor_kernel_size=8, adaptor_stride=8, adaptor_dropout=0.1, num_adapter_layers=1, position_embeddings_type='relative_key', conv_depthwise_kernel_size=31, left_max_position_embeddings=64, right_max_position_embeddings=8, speech_encoder_chunk_size=20000, speech_encoder_left_chunk_num=128, t2u_bos_token_id=0, t2u_pad_token_id=1, t2u_eos_token_id=2, t2u_encoder_layers=6, t2u_encoder_ffn_dim=8192, t2u_encoder_attention_heads=16, t2u_decoder_layers=6, t2u_decoder_ffn_dim=8192, t2u_decoder_attention_heads=16, t2u_max_position_embeddings=4096, t2u_variance_predictor_embed_dim=1024, t2u_variance_predictor_hidden_dim=256, t2u_variance_predictor_kernel_size=3, t2u_variance_pred_dropout=0.5, sampling_rate=16000, upsample_initial_channel=512, upsample_rates=[5, 4, 4, 2, 2], upsample_kernel_sizes=[11, 8, 8, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], leaky_relu_slope=0.1, unit_hifi_gan_vocab_size=10000, unit_embed_dim=1280, lang_embed_dim=256, spkr_embed_dim=256, vocoder_num_langs=36, vocoder_num_spkrs=200, variance_predictor_kernel_size=3, var_pred_dropout=0.5, vocoder_offset=4, **kwargs):
self.vocab_size = vocab_size
self.t2u_vocab_size = t2u_vocab_size
self.char_vocab_size = char_vocab_size
self.hidden_size = hidden_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.max_position_embeddings = max_position_embeddings
self.use_cache = use_cache
self.max_new_tokens = max_new_tokens
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.activation_function = activation_function
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.scale_embedding = scale_embedding
self.num_attention_heads = decoder_attention_heads
self.num_hidden_layers = decoder_layers
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_attention_heads = decoder_attention_heads
self.speech_encoder_layers = speech_encoder_layers
self.speech_encoder_hidden_act = speech_encoder_hidden_act
self.speech_encoder_dropout = speech_encoder_dropout
self.speech_encoder_attention_heads = speech_encoder_attention_heads
self.speech_encoder_layerdrop = speech_encoder_layerdrop
self.speech_encoder_intermediate_size = speech_encoder_intermediate_size
self.feature_projection_input_dim = feature_projection_input_dim
self.adaptor_kernel_size = adaptor_kernel_size
self.adaptor_stride = adaptor_stride
self.adaptor_dropout = adaptor_dropout
self.num_adapter_layers = num_adapter_layers
self.position_embeddings_type = position_embeddings_type
self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
self.add_adapter = add_adapter
self.left_max_position_embeddings = left_max_position_embeddings
self.right_max_position_embeddings = right_max_position_embeddings
self.speech_encoder_chunk_size = speech_encoder_chunk_size
self.speech_encoder_left_chunk_num = speech_encoder_left_chunk_num
self.t2u_bos_token_id = t2u_bos_token_id
self.t2u_pad_token_id = t2u_pad_token_id
self.t2u_eos_token_id = t2u_eos_token_id
self.t2u_encoder_layers = t2u_encoder_layers
self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim
self.t2u_encoder_attention_heads = t2u_encoder_attention_heads
self.t2u_decoder_layers = t2u_decoder_layers
self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim
self.t2u_decoder_attention_heads = t2u_decoder_attention_heads
self.t2u_max_position_embeddings = t2u_max_position_embeddings
self.t2u_variance_predictor_embed_dim = t2u_variance_predictor_embed_dim
self.t2u_variance_predictor_hidden_dim = t2u_variance_predictor_hidden_dim
self.t2u_variance_predictor_kernel_size = t2u_variance_predictor_kernel_size
self.t2u_variance_pred_dropout = t2u_variance_pred_dropout
self.sampling_rate = sampling_rate
self.upsample_initial_channel = upsample_initial_channel
self.upsample_rates = upsample_rates
self.upsample_kernel_sizes = upsample_kernel_sizes
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.leaky_relu_slope = leaky_relu_slope
self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size
self.unit_embed_dim = unit_embed_dim
self.lang_embed_dim = lang_embed_dim
self.spkr_embed_dim = spkr_embed_dim
self.vocoder_num_langs = vocoder_num_langs
self.vocoder_num_spkrs = vocoder_num_spkrs
self.variance_predictor_kernel_size = variance_predictor_kernel_size
self.var_pred_dropout = var_pred_dropout
self.vocoder_offset = vocoder_offset
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, is_encoder_decoder=is_encoder_decoder, max_position_embeddings=max_position_embeddings, **kwargs)
|
class SeamlessM4Tv2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`~SeamlessM4Tv2Model`]. It is used to instantiate
an SeamlessM4Tv2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SeamlessM4Tv2
[""](https://huggingface.co/"") architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256102):
Vocabulary size of the text modality of the SeamlessM4Tv2 model. Defines the number of different tokens
that can be represented by the `inputs_ids` passed when calling [`~SeamlessM4Tv2Model`],
[`~SeamlessM4Tv2ForTextToSpeech`] or [`~SeamlessM4Tv2ForTextToText`].
t2u_vocab_size (`int`, *optional*, defaults to 10082):
Unit vocabulary size of the SeamlessM4Tv2 model. Defines the number of different "unit tokens" that can be
represented by the `inputs_ids` passed when calling the Text-To-Units sub-model of [`~SeamlessM4Tv2Model`],
[`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
char_vocab_size (`int`, *optional*, defaults to 10943):
Character vocabulary size of the SeamlessM4Tv2 model. Defines the number of different character tokens that
can be represented by the `char_inputs_ids` passed when calling the Text-To-Units sub-model of
[`~SeamlessM4Tv2Model`], [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
> Parameters shared across sub-models
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" layers in the architecture.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model text encoder and decoder might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
encoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the encoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.05):
The LayerDrop probability for the decoders. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder and feed-forward layers. If string,
`"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, decoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all attention layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all activation layers in the model.
scale_embedding (`bool`, *optional*, defaults to `True`):
Scale embeddings by diving by sqrt(d_model).
> Text encoder and text decoder specific parameters
encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text encoder.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text encoder.
decoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer text decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text decoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text decoder.
decoder_start_token_id (`int`, *optional*, defaults to 3):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
applied in the text decoder.
max_new_tokens (`int`, *optional*, defaults to 256):
The maximum numbers of text tokens to generate, ignoring the number of tokens in the prompt.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the _padding_ text token. Only applied to the text-decoder model.
bos_token_id (`int`, *optional*, defaults to 2):
The id of the _beginning-of-stream_ text token. Only applied to the text-decoder model.
eos_token_id (`int`, *optional*, defaults to 3):
The id of the _end-of-stream_ text token. Only applied to the text-decoder model.
> Speech encoder specific parameters
speech_encoder_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer speech encoder.
speech_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer speech encoder.
speech_encoder_intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer speech encoder.
speech_encoder_hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
The non-linear activation function (function or string) in the speech encoder. If string, `"gelu"`,
`"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
speech_encoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all layers in the speech encoder.
add_adapter (`bool`, *optional*, defaults to `True`):
Add an adapter layer on top of the speech encoder.
speech_encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability for the speech encoder. See the [LayerDrop paper](see
https://huggingface.co/papers/1909.11556) for more details.
feature_projection_input_dim (`int`, *optional*, defaults to 160):
Input dimension of the input feature projection of the speech encoder, i.e the dimension after processing
input audios with [`SeamlessM4TFeatureExtractor`].
adaptor_kernel_size (`int`, *optional*, defaults to 8):
Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_stride (`int`, *optional*, defaults to 8):
Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adaptor_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all layers in the speech adapter.
num_adapter_layers (`int`, *optional*, defaults to 1):
Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
True`.
position_embeddings_type (`str`, *optional*, defaults to `"relative_key"`):
Can be specified to `relative_key`. If left to `None`, no relative position embedding is applied. Only
applied to the speech encoder. For more information on `"relative_key"`, please refer to [Self-Attention
with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
Kernel size of convolutional depthwise 1D layer in Conformer blocks. Only applied to the speech encoder.
left_max_position_embeddings (`int`, *optional*, defaults to 64):
The left clipping value for relative positions.
right_max_position_embeddings (`int`, *optional*, defaults to 8):
The right clipping value for relative positions.
speech_encoder_chunk_size (`int`, *optional*, defaults to 20000): The size of each attention chunk.
speech_encoder_left_chunk_num (`int`, *optional*, defaults to 128):
Number of chunks on the left up to which lookahead is allowed.
> Text-To-Unit (t2u) model specific parameters
t2u_bos_token_id (`int`, *optional*, defaults to 0):
The id of the _beginning-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_pad_token_id (`int`, *optional*, defaults to 1):
The id of the _padding_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_eos_token_id (`int`, *optional*, defaults to 2):
The id of the _end-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
t2u_encoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit encoder.
t2u_encoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit encoder.
t2u_encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit encoder.
t2u_decoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer text-to-unit decoder.
t2u_decoder_ffn_dim (`int`, *optional*, defaults to 8192):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit decoder.
t2u_decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer text-to-unit decoder.
t2u_max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model text-to-unit component might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
t2u_variance_predictor_embed_dim (`int`, *optional*, defaults to 1024):
The projection dimension of the text-to-unit's duration predictor.
t2u_variance_predictor_hidden_dim (`int`, *optional*, defaults to 256):
Internal dimension of the text-to-unit's duration predictor.
t2u_variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the convolutional layers of the text-to-unit's duration predictor.
t2u_variance_pred_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability of the text-to-unit's duration predictor.
> Hifi-Gan Vocoder specific parameters
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the output audio will be generated, expressed in hertz (Hz).
upsample_initial_channel (`int`, *optional*, defaults to 512):
The number of input channels into the hifi-gan upsampling network. Applies to the vocoder only.
upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[5, 4, 4, 2, 2]`):
A tuple of integers defining the stride of each 1D convolutional layer in the vocoder upsampling network.
The length of *upsample_rates* defines the number of convolutional layers and has to match the length of
*upsample_kernel_sizes*. Applies to the vocoder only.
upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[11, 8, 8, 4, 4]`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the vocoder upsampling
network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match
the length of *upsample_rates*. Applies to the vocoder only.
resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`):
A tuple of integers defining the kernel sizes of the vocoder 1D convolutional layers in the multi-receptive
field fusion (MRF) module. Applies to the vocoder only.
resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
A nested tuple of integers defining the dilation rates of the vocoder dilated 1D convolutional layers in
the multi-receptive field fusion (MRF) module. Applies to the vocoder only.
leaky_relu_slope (`float`, *optional*, defaults to 0.1):
The angle of the negative slope used by the leaky ReLU activation in the vocoder. Applies to the vocoder
only.
unit_hifi_gan_vocab_size (`int`, *optional*, defaults to 10000):
Vocabulary size of the SeamlessM4Tv2 vocoder. Defines the number of different unit tokens that can be
represented by the `inputs_ids` passed when calling the vocoder of [`~SeamlessM4Tv2Model`],
[`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
unit_embed_dim (`int`, *optional*, defaults to 1280):
The projection dimension of the input ids given to the hifi-gan vocoder. Applies to the vocoder only.
lang_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the target language given to the hifi-gan vocoder. Applies to the vocoder only.
spkr_embed_dim (`int`, *optional*, defaults to 256):
The projection dimension of the speaker id given to the hifi-gan vocoder. Applies to the vocoder only.
vocoder_num_langs (`int`, *optional*, defaults to 36):
Number of langs supported by the vocoder. Might be different from `t2u_num_langs`.
vocoder_num_spkrs (`int`, *optional*, defaults to 200):
Number of speakers supported by the vocoder.
variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the duration predictor. Applies to the vocoder only.
var_pred_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability of the duration predictor. Applies to the vocoder only.
vocoder_offset (`int`, *optional*, defaults to 4):
Offset the unit token ids by this number to account for symbol tokens. Applies to the vocoder only.
```python
>>> from transformers import SeamlessM4Tv2Model, SeamlessM4Tv2Config
>>> # Initializing a SeamlessM4Tv2 "" style configuration
>>> configuration = SeamlessM4Tv2Config()
>>> # Initializing a model from the "" style configuration
>>> model = SeamlessM4Tv2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=256102, t2u_vocab_size=10082, char_vocab_size=10943, hidden_size=1024, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, max_position_embeddings=4096, is_encoder_decoder=True, encoder_layerdrop=0.05, decoder_layerdrop=0.05, activation_function='relu', dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, scale_embedding=True, encoder_layers=24, encoder_ffn_dim=8192, encoder_attention_heads=16, decoder_layers=24, decoder_ffn_dim=8192, decoder_attention_heads=16, decoder_start_token_id=3, max_new_tokens=256, pad_token_id=0, bos_token_id=2, eos_token_id=3, speech_encoder_layers=24, speech_encoder_attention_heads=16, speech_encoder_intermediate_size=4096, speech_encoder_hidden_act='swish', speech_encoder_dropout=0.0, add_adapter=True, speech_encoder_layerdrop=0.1, feature_projection_input_dim=160, adaptor_kernel_size=8, adaptor_stride=8, adaptor_dropout=0.1, num_adapter_layers=1, position_embeddings_type='relative_key', conv_depthwise_kernel_size=31, left_max_position_embeddings=64, right_max_position_embeddings=8, speech_encoder_chunk_size=20000, speech_encoder_left_chunk_num=128, t2u_bos_token_id=0, t2u_pad_token_id=1, t2u_eos_token_id=2, t2u_encoder_layers=6, t2u_encoder_ffn_dim=8192, t2u_encoder_attention_heads=16, t2u_decoder_layers=6, t2u_decoder_ffn_dim=8192, t2u_decoder_attention_heads=16, t2u_max_position_embeddings=4096, t2u_variance_predictor_embed_dim=1024, t2u_variance_predictor_hidden_dim=256, t2u_variance_predictor_kernel_size=3, t2u_variance_pred_dropout=0.5, sampling_rate=16000, upsample_initial_channel=512, upsample_rates=[5, 4, 4, 2, 2], upsample_kernel_sizes=[11, 8, 8, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], leaky_relu_slope=0.1, unit_hifi_gan_vocab_size=10000, unit_embed_dim=1280, lang_embed_dim=256, spkr_embed_dim=256, vocoder_num_langs=36, vocoder_num_spkrs=200, variance_predictor_kernel_size=3, var_pred_dropout=0.5, vocoder_offset=4, **kwargs):
pass
| 2
| 1
| 180
| 6
| 160
| 18
| 1
| 1.33
| 1
| 1
| 0
| 0
| 1
| 72
| 1
| 1
| 399
| 25
| 162
| 153
| 82
| 216
| 76
| 75
| 74
| 1
| 1
| 0
| 1
|
5,151
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.HifiGanResidualBlock
|
from torch import Tensor, nn
class HifiGanResidualBlock(nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1):
super().__init__()
self.leaky_relu_slope = leaky_relu_slope
self.convs1 = nn.ModuleList([nn.Conv1d(channels, channels, kernel_size, stride=1, dilation=dilation[i], padding=self.get_padding(kernel_size, dilation[i])) for i in range(len(dilation))])
self.convs2 = nn.ModuleList([nn.Conv1d(channels, channels, kernel_size, stride=1, dilation=1, padding=self.get_padding(kernel_size, 1)) for _ in range(len(dilation))])
def get_padding(self, kernel_size, dilation=1):
return (kernel_size * dilation - dilation) // 2
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, 'weight_norm'):
weight_norm = nn.utils.parametrizations.weight_norm
for layer in self.convs1:
weight_norm(layer)
for layer in self.convs2:
weight_norm(layer)
def remove_weight_norm(self):
for layer in self.convs1:
nn.utils.remove_weight_norm(layer)
for layer in self.convs2:
nn.utils.remove_weight_norm(layer)
def forward(self, hidden_states):
for conv1, conv2 in zip(self.convs1, self.convs2):
residual = hidden_states
hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
hidden_states = conv1(hidden_states)
hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
hidden_states = conv2(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
|
class HifiGanResidualBlock(nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1):
pass
def get_padding(self, kernel_size, dilation=1):
pass
def apply_weight_norm(self):
pass
def remove_weight_norm(self):
pass
def forward(self, hidden_states):
pass
| 6
| 0
| 11
| 0
| 11
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 5
| 3
| 5
| 15
| 60
| 6
| 54
| 15
| 48
| 0
| 30
| 14
| 24
| 4
| 1
| 1
| 11
|
5,152
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2Attention
|
from torch import Tensor, nn
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
from ...utils.deprecation import deprecate_kwarg
class SeamlessM4Tv2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[SeamlessM4Tv2Config]=None, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = encoder_hidden_states is not None
batch_size, seq_length = hidden_states.shape[:2]
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
query_states = self.q_proj(hidden_states)
query_states = query_states.reshape(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
query_states = query_states * self.scaling
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).type_as(attention_scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
context_states = torch.matmul(attn_weights, value_states)
context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1)
attn_output = self.out_proj(context_states)
return (attn_output, attn_weights)
|
class SeamlessM4Tv2Attention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[SeamlessM4Tv2Config]=None, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 31
| 3
| 22
| 6
| 3
| 0.28
| 1
| 7
| 1
| 0
| 3
| 12
| 3
| 13
| 100
| 13
| 68
| 44
| 48
| 19
| 47
| 28
| 43
| 7
| 1
| 2
| 10
|
5,153
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2CodeHifiGan
|
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...modeling_utils import PreTrainedModel
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
@auto_docstring(custom_intro='\n Code HiFi-GAN vocoder as described in this [repository](https://github.com/facebookresearch/speech-resynthesis).\n ')
class SeamlessM4Tv2CodeHifiGan(PreTrainedModel):
config: SeamlessM4Tv2Config
main_input_name = 'input_embeds'
_no_split_modules = []
def __init__(self, config):
super().__init__(config)
self.pad_token_id = config.t2u_pad_token_id
embed_dim = config.unit_embed_dim
kernel_size = config.variance_predictor_kernel_size
var_pred_dropout = config.var_pred_dropout
self.dur_predictor = SeamlessM4Tv2VariancePredictor(embed_dim, embed_dim, kernel_size, var_pred_dropout)
self.unit_embedding = nn.Embedding(config.unit_hifi_gan_vocab_size, config.unit_embed_dim)
self.speaker_embedding = nn.Embedding(config.vocoder_num_spkrs, config.spkr_embed_dim)
self.language_embedding = nn.Embedding(config.vocoder_num_langs, config.lang_embed_dim)
self.hifi_gan = SeamlessM4Tv2HifiGan(config)
self.post_init()
def _get_dur_output_lengths(self, input_ids, dur_out):
"""
Computes the output length after the duration layer.
"""
unit_lengths = (input_ids != self.pad_token_id).sum(1)
unit_lengths = torch.clamp(unit_lengths, 0, dur_out.shape[1] - 1)
cumulative_dur_out = torch.cumsum(dur_out, dim=1)
unit_lengths = cumulative_dur_out.gather(dim=1, index=unit_lengths.unsqueeze(1)).squeeze()
return unit_lengths
def _get_output_hifigan_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the hifigan convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
return torch.div(input_length + 2 * pad - dilation * (kernel_size - 1) - 1, stride, rounding_mode='floor') + 1
def _transpose_conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
return (input_length - 1) * stride - 2 * pad + dilation * (kernel_size - 1) + 1
input_lengths = _conv_out_length(input_lengths, 7, 1, 3)
for i, (upsample_rate, kernel_size) in enumerate(zip(self.config.upsample_rates, self.config.upsample_kernel_sizes)):
input_lengths = _transpose_conv_out_length(input_lengths, kernel_size, upsample_rate, (kernel_size - upsample_rate) // 2)
for i in range(len(self.config.upsample_rates)):
for kernel_size, dilation in zip(self.config.resblock_kernel_sizes, self.config.resblock_dilation_sizes):
for dil in dilation:
input_lengths = _conv_out_length(input_lengths, kernel_size, 1, (kernel_size - 1) * dil // 2, dilation=dil)
for dil in dilation:
input_lengths = _conv_out_length(input_lengths, kernel_size, 1, (kernel_size - 1) // 2, dilation=1)
input_lengths = _conv_out_length(input_lengths, 7, 1, 3)
return input_lengths
def forward(self, input_ids: torch.LongTensor, speaker_id: torch.Tensor, lang_id: torch.Tensor) -> tuple[torch.Tensor]:
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4Tv2TextToUnitForConditionalGeneration`]. [What are input
IDs?](../glossary#input-ids)
speaker_id (`int`, *optional*):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
tgt_lang (`str`, *optional*):
The language id to use as target language for translation.
"""
hidden_states = self.unit_embedding(input_ids).transpose(1, 2)
spkr = self.speaker_embedding(speaker_id).transpose(1, 2)
lang = self.language_embedding(lang_id).transpose(1, 2)
log_dur_pred = self.dur_predictor(hidden_states.transpose(1, 2))
dur_out = torch.clamp(torch.round(torch.expm1(log_dur_pred)).long(), min=1)
if hidden_states.size(0) == 1:
hidden_states = torch.repeat_interleave(hidden_states, dur_out.view(-1), dim=2)
else:
if hidden_states.shape[0] > 1 and self.training:
logger.warning('`self.training=True` and you use batching. You lose parallelism during the hifigan\n forward pass because the samples are interleaved.')
hidden_states = [torch.repeat_interleave(hidden_state, duration, dim=-1).transpose(0, 1) for hidden_state, duration in zip(hidden_states, dur_out)]
hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True).transpose(1, 2)
spkr = spkr.repeat(1, 1, hidden_states.shape[-1])
lang = lang.repeat(1, 1, hidden_states.shape[-1])
hidden_states = torch.cat([lang, hidden_states, spkr], dim=1)
hidden_states = self.hifi_gan(hidden_states)
unit_lengths = self._get_dur_output_lengths(input_ids, dur_out)
lengths = self._get_output_hifigan_lengths(unit_lengths)
return (hidden_states, lengths)
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
std = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, 'weight_norm'):
weight_norm = nn.utils.parametrizations.weight_norm
weight_norm(self.hifi_gan.conv_pre)
for layer in self.hifi_gan.upsampler:
weight_norm(layer)
for layer in self.hifi_gan.resblocks:
layer.apply_weight_norm()
weight_norm(self.hifi_gan.conv_post)
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.hifi_gan.conv_pre)
for layer in self.hifi_gan.upsampler:
nn.utils.remove_weight_norm(layer)
for layer in self.hifi_gan.resblocks:
layer.remove_weight_norm()
nn.utils.remove_weight_norm(self.hifi_gan.conv_post)
|
@auto_docstring(custom_intro='\n Code HiFi-GAN vocoder as described in this [repository](https://github.com/facebookresearch/speech-resynthesis).\n ')
class SeamlessM4Tv2CodeHifiGan(PreTrainedModel):
def __init__(self, config):
pass
def _get_dur_output_lengths(self, input_ids, dur_out):
'''
Computes the output length after the duration layer.
'''
pass
def _get_output_hifigan_lengths(self, input_lengths: Union[torch.LongTensor, int]):
'''
Computes the output length of the hifigan convolutional layers
'''
pass
def _conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
pass
def _transpose_conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
pass
def forward(self, input_ids: torch.LongTensor, speaker_id: torch.Tensor, lang_id: torch.Tensor) -> tuple[torch.Tensor]:
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4Tv2TextToUnitForConditionalGeneration`]. [What are input
IDs?](../glossary#input-ids)
speaker_id (`int`, *optional*):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
tgt_lang (`str`, *optional*):
The language id to use as target language for translation.
'''
pass
def _init_weights(self, module: nn.Module):
'''Initialize the weights.'''
pass
def apply_weight_norm(self):
pass
def remove_weight_norm(self):
pass
| 11
| 4
| 17
| 3
| 11
| 3
| 3
| 0.34
| 1
| 8
| 2
| 0
| 7
| 6
| 7
| 7
| 164
| 30
| 100
| 39
| 88
| 34
| 82
| 37
| 72
| 6
| 1
| 3
| 25
|
5,154
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ConformerAdapter
|
from torch import Tensor, nn
class SeamlessM4Tv2ConformerAdapter(nn.Module):
def __init__(self, config):
super().__init__()
self.layers = nn.ModuleList((SeamlessM4Tv2ConformerAdapterLayer(config) for _ in range(config.num_adapter_layers)))
def forward(self, hidden_states, attention_mask):
for layer in self.layers:
hidden_states = layer(hidden_states, attention_mask)
return hidden_states
|
class SeamlessM4Tv2ConformerAdapter(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask):
pass
| 3
| 0
| 7
| 2
| 5
| 1
| 2
| 0.1
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 15
| 4
| 10
| 6
| 7
| 1
| 8
| 5
| 5
| 2
| 1
| 1
| 3
|
5,155
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ConformerAdapterLayer
|
from torch import Tensor, nn
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
import torch
from typing import Optional, Union
class SeamlessM4Tv2ConformerAdapterLayer(nn.Module):
def __init__(self, config):
super().__init__()
embed_dim = config.hidden_size
dropout = config.adaptor_dropout
self.kernel_size = config.adaptor_kernel_size
self.stride = config.adaptor_stride
self.residual_layer_norm = nn.LayerNorm(embed_dim)
self.residual_conv = nn.Conv1d(embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2)
self.activation = nn.GLU(dim=1)
self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
self.self_attn_conv = nn.Conv1d(embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2)
self.self_attn = SeamlessM4Tv2ConformerSelfAttention(config, use_position_embeddings=False)
self.self_attn_dropout = nn.Dropout(dropout)
self.ffn_layer_norm = nn.LayerNorm(embed_dim)
self.ffn = SeamlessM4Tv2ConformerFeedForward(config, act_fn='relu', dropout=dropout)
def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
pad = self.kernel_size // 2
seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1)
seq_lens = (seq_lens + 2 * pad - self.kernel_size) / self.stride + 1
return seq_lens.floor()
def forward(self, hidden_states, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False):
residual = self.residual_layer_norm(hidden_states)
residual = residual.transpose(1, 2)
residual = self.residual_conv(residual)
residual = self.activation(residual)
residual = residual.transpose(1, 2)
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.self_attn_conv(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(hidden_states.device)
attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths)
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
hidden_states, attn_weights = self.self_attn(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = self.self_attn_dropout(hidden_states)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.ffn_layer_norm(hidden_states)
hidden_states = self.ffn(hidden_states) + residual
return hidden_states
|
class SeamlessM4Tv2ConformerAdapterLayer(nn.Module):
def __init__(self, config):
pass
def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
pass
def forward(self, hidden_states, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False):
pass
| 4
| 0
| 31
| 4
| 23
| 4
| 1
| 0.17
| 1
| 5
| 2
| 0
| 3
| 11
| 3
| 13
| 96
| 15
| 69
| 27
| 60
| 12
| 43
| 22
| 39
| 2
| 1
| 1
| 4
|
5,156
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ConformerConvolutionModule
|
from torch import Tensor, nn
from ...activations import ACT2FN
import torch
class SeamlessM4Tv2ConformerConvolutionModule(nn.Module):
"""Convolution block used in the conformer block. Uses a causal depthwise convolution similar to that
described in Section 2.1 of https://huggingface.co/papers/1609.03499
"""
def __init__(self, config):
super().__init__()
if (config.conv_depthwise_kernel_size - 1) % 2 == 1:
raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding")
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.pointwise_conv1 = nn.Conv1d(config.hidden_size, 2 * config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False)
self.glu = nn.GLU(dim=1)
self.depthwise_conv = nn.Conv1d(config.hidden_size, config.hidden_size, config.conv_depthwise_kernel_size, stride=1, padding=0, groups=config.hidden_size, bias=False)
self.depthwise_layer_norm = nn.LayerNorm(config.hidden_size)
self.activation = ACT2FN[config.speech_encoder_hidden_act]
self.pointwise_conv2 = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False)
self.dropout = nn.Dropout(config.speech_encoder_dropout)
def forward(self, hidden_states, attention_mask=None):
hidden_states = self.layer_norm(hidden_states)
if attention_mask is not None:
hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.pointwise_conv1(hidden_states)
hidden_states = self.glu(hidden_states)
hidden_states = torch.nn.functional.pad(hidden_states, (self.depthwise_conv.kernel_size[0] - 1, 0))
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.depthwise_layer_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
hidden_states = self.activation(hidden_states)
hidden_states = self.pointwise_conv2(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
|
class SeamlessM4Tv2ConformerConvolutionModule(nn.Module):
'''Convolution block used in the conformer block. Uses a causal depthwise convolution similar to that
described in Section 2.1 of https://huggingface.co/papers/1609.03499
'''
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None):
pass
| 3
| 1
| 32
| 3
| 25
| 4
| 2
| 0.2
| 1
| 2
| 0
| 0
| 2
| 8
| 2
| 12
| 68
| 8
| 50
| 11
| 47
| 10
| 28
| 11
| 25
| 2
| 1
| 1
| 4
|
5,157
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ConformerEncoder
|
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from torch import Tensor, nn
import torch
from ...integrations.fsdp import is_fsdp_managed_module
class SeamlessM4Tv2ConformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dropout = nn.Dropout(config.speech_encoder_dropout)
self.layers = nn.ModuleList([SeamlessM4Tv2ConformerEncoderLayer(config) for _ in range(config.speech_encoder_layers)])
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
def _apply_chunk_attention(self, attention_mask, hidden_states):
"""
Creates a chunk attention mask. It creates a mask to prevent attention across chunks, ensuring that each
position attends only to positions within its own chunk. If a left chunk overlap is specified
(`speech_encoder_chunk_size` in the configuration), the attention mask is adjusted accordingly to allow each
position to also attends the `speech_encoder_chunk_size - 1` previous chunks.
"""
sequence_len = hidden_states.shape[1]
chunk_indices = torch.arange(sequence_len, device=hidden_states.device)
chunk_indices = torch.div(chunk_indices, self.config.speech_encoder_chunk_size).long()
start_indices = torch.full_like(chunk_indices, 0)
if self.config.speech_encoder_left_chunk_num >= 0:
start_indices = (chunk_indices - self.config.speech_encoder_left_chunk_num).clamp_(min=0)
start_indices = start_indices * self.config.speech_encoder_chunk_size
start_indices = start_indices
start_indices = start_indices.unsqueeze(1).expand(-1, sequence_len)
end_indices = ((chunk_indices + 1) * self.config.speech_encoder_chunk_size).clamp_(max=sequence_len)
end_indices = end_indices.unsqueeze(1).expand(-1, sequence_len)
indices = torch.arange(sequence_len, device=hidden_states.device).unsqueeze(0).expand(sequence_len, -1)
chunk_mask = (indices < start_indices) | (indices >= end_indices)
chunk_mask = chunk_mask.unsqueeze(0).unsqueeze(0)
attention_mask = chunk_mask if attention_mask is None else attention_mask.bool() | chunk_mask
attention_mask = attention_mask.to(dtype=hidden_states.dtype)
return attention_mask
def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
conv_attention_mask = attention_mask
if attention_mask is not None:
hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
attention_mask = attention_mask.expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
if self.config.speech_encoder_chunk_size is not None:
attention_mask = self._apply_chunk_attention(attention_mask, hidden_states)
if attention_mask is not None:
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.speech_encoder_layerdrop
if not skip_the_layer or synced_gpus:
layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, conv_attention_mask=conv_attention_mask)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class SeamlessM4Tv2ConformerEncoder(nn.Module):
def __init__(self, config):
pass
def _apply_chunk_attention(self, attention_mask, hidden_states):
'''
Creates a chunk attention mask. It creates a mask to prevent attention across chunks, ensuring that each
position attends only to positions within its own chunk. If a left chunk overlap is specified
(`speech_encoder_chunk_size` in the configuration), the attention mask is adjusted accordingly to allow each
position to also attends the `speech_encoder_chunk_size - 1` previous chunks.
'''
pass
def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
pass
| 4
| 1
| 40
| 7
| 29
| 3
| 6
| 0.11
| 1
| 6
| 2
| 0
| 3
| 5
| 3
| 13
| 123
| 24
| 89
| 31
| 78
| 10
| 60
| 23
| 56
| 15
| 1
| 3
| 19
|
5,158
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ConformerEncoderLayer
|
from torch import Tensor, nn
from ...modeling_layers import GradientCheckpointingLayer
import torch
from typing import Optional, Union
class SeamlessM4Tv2ConformerEncoderLayer(GradientCheckpointingLayer):
"""Conformer block based on https://huggingface.co/papers/2005.08100."""
def __init__(self, config):
super().__init__()
embed_dim = config.hidden_size
dropout = config.speech_encoder_dropout
self.ffn1_layer_norm = nn.LayerNorm(embed_dim)
self.ffn1 = SeamlessM4Tv2ConformerFeedForward(config)
self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
self.self_attn_dropout = nn.Dropout(dropout)
self.self_attn = SeamlessM4Tv2ConformerSelfAttention(config)
self.conv_module = SeamlessM4Tv2ConformerConvolutionModule(config)
self.ffn2_layer_norm = nn.LayerNorm(embed_dim)
self.ffn2 = SeamlessM4Tv2ConformerFeedForward(config)
self.final_layer_norm = nn.LayerNorm(embed_dim)
def forward(self, hidden_states, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, conv_attention_mask: Optional[torch.Tensor]=None):
hidden_states = hidden_states
residual = hidden_states
hidden_states = self.ffn1_layer_norm(hidden_states)
hidden_states = self.ffn1(hidden_states)
hidden_states = hidden_states * 0.5 + residual
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = self.self_attn_dropout(hidden_states)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.ffn2_layer_norm(hidden_states)
hidden_states = self.ffn2(hidden_states)
hidden_states = hidden_states * 0.5 + residual
hidden_states = self.final_layer_norm(hidden_states)
return (hidden_states, attn_weights)
|
class SeamlessM4Tv2ConformerEncoderLayer(GradientCheckpointingLayer):
'''Conformer block based on https://huggingface.co/papers/2005.08100.'''
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, conv_attention_mask: Optional[torch.Tensor]=None):
pass
| 3
| 1
| 30
| 5
| 22
| 4
| 1
| 0.23
| 1
| 6
| 3
| 0
| 2
| 9
| 2
| 12
| 65
| 11
| 44
| 22
| 35
| 10
| 34
| 16
| 31
| 1
| 1
| 0
| 2
|
5,159
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ConformerFeatureProjection
|
from torch import Tensor, nn
class SeamlessM4Tv2ConformerFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps)
self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size)
self.dropout = nn.Dropout(config.speech_encoder_dropout)
def forward(self, hidden_states):
norm_hidden_states = self.layer_norm(hidden_states.to(self.layer_norm.weight.dtype))
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class SeamlessM4Tv2ConformerFeatureProjection(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.18
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 14
| 1
| 11
| 7
| 8
| 2
| 11
| 7
| 8
| 1
| 1
| 0
| 2
|
5,160
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ConformerFeedForward
|
from torch import Tensor, nn
from ...activations import ACT2FN
class SeamlessM4Tv2ConformerFeedForward(nn.Module):
def __init__(self, config, act_fn=None, dropout=None):
super().__init__()
dropout = dropout if dropout is not None else config.speech_encoder_dropout
act_fn = act_fn if act_fn is not None else config.speech_encoder_hidden_act
self.intermediate_dropout = nn.Dropout(dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.speech_encoder_intermediate_size)
self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn
self.output_dense = nn.Linear(config.speech_encoder_intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
|
class SeamlessM4Tv2ConformerFeedForward(nn.Module):
def __init__(self, config, act_fn=None, dropout=None):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 10
| 2
| 8
| 0
| 3
| 0
| 1
| 2
| 0
| 0
| 2
| 5
| 2
| 12
| 21
| 4
| 17
| 8
| 14
| 0
| 17
| 8
| 14
| 4
| 1
| 0
| 5
|
5,161
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ConformerSelfAttention
|
from torch import Tensor, nn
import math
import torch
from typing import Optional, Union
class SeamlessM4Tv2ConformerSelfAttention(nn.Module):
"""Construct a SeamlessM4Tv2ConformerSelfAttention object.
Can be enhanced with relative position embeddings.
"""
def __init__(self, config, use_position_embeddings=True):
super().__init__()
self.head_size = config.hidden_size // config.speech_encoder_attention_heads
self.num_heads = config.speech_encoder_attention_heads
self.position_embeddings_type = config.position_embeddings_type if use_position_embeddings else None
self.linear_q = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_k = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_v = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_out = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(p=config.speech_encoder_dropout)
if self.position_embeddings_type == 'relative_key':
self.left_max_position_embeddings = config.left_max_position_embeddings
self.right_max_position_embeddings = config.right_max_position_embeddings
num_positions = self.left_max_position_embeddings + self.right_max_position_embeddings + 1
self.distance_embedding = nn.Embedding(num_positions, self.head_size)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
batch_size, sequence_length, hidden_size = hidden_states.size()
query_key_states = hidden_states
value_states = hidden_states
query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
attn_weights = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size)
if self.position_embeddings_type == 'relative_key':
query_length, key_length = (query.shape[2], key.shape[2])
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_r - position_ids_l
distance = torch.clamp(distance, -self.left_max_position_embeddings, self.right_max_position_embeddings)
positional_embedding = self.distance_embedding(distance + self.left_max_position_embeddings)
positional_embedding = positional_embedding.to(dtype=query.dtype)
relative_position_attn_weights = torch.einsum('bhld,lrd->bhlr', query, positional_embedding)
attn_weights = attn_weights + relative_position_attn_weights / math.sqrt(self.head_size)
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = torch.softmax(attn_weights, dim=-1)
attn_weights = self.dropout(attn_weights)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size)
attn_output = self.linear_out(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class SeamlessM4Tv2ConformerSelfAttention(nn.Module):
'''Construct a SeamlessM4Tv2ConformerSelfAttention object.
Can be enhanced with relative position embeddings.
'''
def __init__(self, config, use_position_embeddings=True):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 3
| 1
| 39
| 9
| 26
| 5
| 4
| 0.23
| 1
| 3
| 0
| 0
| 2
| 11
| 2
| 12
| 83
| 20
| 52
| 34
| 44
| 12
| 47
| 29
| 44
| 4
| 1
| 1
| 7
|
5,162
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2Decoder
|
import math
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
@auto_docstring(custom_intro='\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4Tv2DecoderLayer`].\n ')
class SeamlessM4Tv2Decoder(SeamlessM4Tv2PreTrainedModel):
def __init__(self, config: SeamlessM4Tv2Config, embed_tokens: Optional[nn.Embedding]=None):
"""
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
"""
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.max_target_positions = config.max_position_embeddings
embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = SeamlessM4Tv2ScaledWordEmbedding(embed_tokens.num_embeddings, embed_tokens.embedding_dim, self.padding_idx, embed_scale=embed_scale)
self.embed_tokens.weight = embed_tokens.weight
else:
self.embed_tokens = SeamlessM4Tv2ScaledWordEmbedding(self.vocab_size, config.hidden_size, self.padding_idx, embed_scale=embed_scale)
self.embed_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding(self.max_target_positions, config.hidden_size, padding_idx=self.padding_idx)
layers = []
for i in range(config.decoder_layers):
layers.append(SeamlessM4Tv2DecoderLayer(config, decoder_attention_heads=config.decoder_attention_heads, decoder_ffn_dim=config.decoder_ffn_dim, layer_idx=i))
self.layers = nn.ModuleList(layers)
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.gradient_checkpointing = False
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')
elif input_ids is not None:
input = input_ids
input_shape = input.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...')
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)
if encoder_hidden_states is not None and encoder_attention_mask is not None:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
positions = self.embed_positions(input, past_key_values_length=past_key_values_length)
hidden_states = inputs_embeds + positions.to(inputs_embeds.device)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
|
@auto_docstring(custom_intro='\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4Tv2DecoderLayer`].\n ')
class SeamlessM4Tv2Decoder(SeamlessM4Tv2PreTrainedModel):
def __init__(self, config: SeamlessM4Tv2Config, embed_tokens: Optional[nn.Embedding]=None):
'''
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
pass
| 5
| 1
| 59
| 8
| 37
| 14
| 9
| 0.36
| 1
| 12
| 5
| 0
| 4
| 10
| 4
| 10
| 238
| 34
| 150
| 47
| 129
| 54
| 80
| 31
| 75
| 29
| 2
| 3
| 35
|
5,163
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2DecoderLayer
|
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
from torch import Tensor, nn
from ...modeling_layers import GradientCheckpointingLayer
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...activations import ACT2FN
from ...utils.deprecation import deprecate_kwarg
class SeamlessM4Tv2DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4Tv2Config, decoder_ffn_dim=None, decoder_attention_heads=None, layer_idx=None):
super().__init__()
decoder_ffn_dim = config.decoder_ffn_dim if decoder_ffn_dim is None else decoder_ffn_dim
decoder_attention_heads = config.decoder_attention_heads if decoder_attention_heads is None else decoder_attention_heads
self.embed_dim = config.hidden_size
self.self_attn = SeamlessM4Tv2Attention(embed_dim=self.embed_dim, num_heads=decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.attn_dropout = nn.Dropout(config.dropout)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.cross_attention = SeamlessM4Tv2Attention(self.embed_dim, decoder_attention_heads, config.attention_dropout, is_decoder=True, layer_idx=layer_idx)
self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim)
self.ffn = SeamlessM4Tv2FeedForwardNetwork(config, ffn_dim=decoder_ffn_dim)
self.ffn_layer_norm = nn.LayerNorm(config.hidden_size)
self.ffn_dropout = nn.Dropout(config.activation_dropout)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`):
encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by
very large negative values.
past_key_values (`Cache`):
cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.cross_attention_layer_norm(hidden_states)
hidden_states, cross_attn_weights = self.cross_attention(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, attention_mask=encoder_attention_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.ffn_layer_norm(hidden_states)
hidden_states = self.ffn(hidden_states)
hidden_states = self.ffn_dropout(hidden_states)
hidden_states = residual + hidden_states
return (hidden_states, self_attn_weights, cross_attn_weights)
|
class SeamlessM4Tv2DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4Tv2Config, decoder_ffn_dim=None, decoder_attention_heads=None, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor:
'''
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`):
encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by
very large negative values.
past_key_values (`Cache`):
cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 4
| 1
| 56
| 8
| 35
| 13
| 4
| 0.35
| 1
| 6
| 3
| 0
| 2
| 11
| 2
| 12
| 113
| 17
| 71
| 30
| 59
| 25
| 42
| 21
| 39
| 5
| 1
| 1
| 8
|
5,164
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2Encoder
|
import math
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
@auto_docstring(custom_intro='\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`SeamlessM4Tv2EncoderLayer`].\n ')
class SeamlessM4Tv2Encoder(SeamlessM4Tv2PreTrainedModel):
def __init__(self, config: SeamlessM4Tv2Config, embed_tokens: Optional[nn.Embedding]=None, is_t2u_encoder: bool=False):
"""
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
is_t2u_encoder (`bool`, *optional*, defaults to `False`):
indicates if it belongs to the text-to-units model, in which case it won't have input embeddings
"""
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.padding_idx = config.pad_token_id
embed_dim = config.hidden_size
self.is_t2u_encoder = is_t2u_encoder
self.max_source_positions = config.max_position_embeddings
if not self.is_t2u_encoder:
embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_tokens = SeamlessM4Tv2ScaledWordEmbedding(config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale)
if embed_tokens is not None:
self.embed_tokens.weight = embed_tokens.weight
self.embed_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding(self.max_source_positions, embed_dim, self.padding_idx)
layers = []
for _ in range(config.encoder_layers):
layers.append(SeamlessM4Tv2EncoderLayer(config, encoder_attention_heads=config.encoder_attention_heads, encoder_ffn_dim=config.encoder_ffn_dim))
self.layers = nn.ModuleList(layers)
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.gradient_checkpointing = False
self.post_init()
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, BaseModelOutput]:
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and self.is_t2u_encoder:
raise ValueError('You cannot pass input_ids to the encoder of the text_to_units model. Pass inputs_embeds instead.')
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input = inputs_embeds[:, :, -1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if not self.is_t2u_encoder:
embed_pos = self.embed_positions(input)
hidden_states = inputs_embeds + embed_pos.to(inputs_embeds.device)
else:
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if attention_mask is not None:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
@auto_docstring(custom_intro='\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`SeamlessM4Tv2EncoderLayer`].\n ')
class SeamlessM4Tv2Encoder(SeamlessM4Tv2PreTrainedModel):
def __init__(self, config: SeamlessM4Tv2Config, embed_tokens: Optional[nn.Embedding]=None, is_t2u_encoder: bool=False):
'''
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
is_t2u_encoder (`bool`, *optional*, defaults to `False`):
indicates if it belongs to the text-to-units model, in which case it won't have input embeddings
'''
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, BaseModelOutput]:
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 4
| 2
| 87
| 15
| 57
| 16
| 14
| 0.28
| 1
| 12
| 5
| 0
| 2
| 10
| 2
| 8
| 176
| 30
| 115
| 40
| 98
| 32
| 68
| 26
| 65
| 22
| 2
| 3
| 27
|
5,165
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2EncoderLayer
|
from torch import Tensor, nn
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
from ...modeling_layers import GradientCheckpointingLayer
import torch
class SeamlessM4Tv2EncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4Tv2Config, encoder_ffn_dim=None, encoder_attention_heads=None):
super().__init__()
encoder_ffn_dim = config.encoder_ffn_dim if encoder_ffn_dim is None else encoder_ffn_dim
encoder_attention_heads = config.encoder_attention_heads if encoder_attention_heads is None else encoder_attention_heads
self.embed_dim = config.hidden_size
self.self_attn = SeamlessM4Tv2Attention(embed_dim=self.embed_dim, num_heads=encoder_attention_heads, dropout=config.attention_dropout)
self.attn_dropout = nn.Dropout(config.dropout)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.ffn = SeamlessM4Tv2FeedForwardNetwork(config, ffn_dim=encoder_ffn_dim)
self.ffn_layer_norm = nn.LayerNorm(config.hidden_size)
self.ffn_dropout = nn.Dropout(config.activation_dropout)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.ffn_layer_norm(hidden_states)
hidden_states = self.ffn(hidden_states)
hidden_states = self.ffn_dropout(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class SeamlessM4Tv2EncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4Tv2Config, encoder_ffn_dim=None, encoder_attention_heads=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:
'''
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
'''
pass
| 3
| 1
| 30
| 5
| 21
| 4
| 3
| 0.19
| 1
| 6
| 3
| 0
| 2
| 7
| 2
| 12
| 61
| 11
| 42
| 18
| 34
| 8
| 27
| 13
| 24
| 3
| 1
| 1
| 5
|
5,166
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2FeedForwardNetwork
|
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
import torch
from torch import Tensor, nn
from ...activations import ACT2FN
class SeamlessM4Tv2FeedForwardNetwork(nn.Module):
def __init__(self, config: SeamlessM4Tv2Config, ffn_dim: int):
super().__init__()
self.fc1 = nn.Linear(config.hidden_size, ffn_dim)
self.fc2 = nn.Linear(ffn_dim, config.hidden_size)
self.dropout = nn.Dropout(config.activation_dropout)
self.act = ACT2FN[config.activation_function]
def forward(self, hidden_states):
hidden_states = self.fc1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
if isinstance(self.fc2.weight, torch.Tensor) and hidden_states.dtype != self.fc2.weight.dtype and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8):
hidden_states = hidden_states.to(self.fc2.weight.dtype)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class SeamlessM4Tv2FeedForwardNetwork(nn.Module):
def __init__(self, config: SeamlessM4Tv2Config, ffn_dim: int):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 4
| 2
| 12
| 20
| 1
| 19
| 7
| 16
| 0
| 15
| 7
| 12
| 2
| 1
| 1
| 3
|
5,167
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ForSpeechToSpeech
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n The speech-to-speech SeamlessM4Tv2 Model transformer which can be used for S2ST.\n ')
class SeamlessM4Tv2ForSpeechToSpeech(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = ['text_encoder']
main_input_name = 'input_features'
_tied_weights_keys = ['lm_head.weight', 'text_decoder.embed_tokens.weight']
def __init__(self, config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config)
self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config)
self.vocoder = SeamlessM4Tv2CodeHifiGan(config)
def get_encoder(self):
return self.speech_encoder
def get_decoder(self):
return self.text_decoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_decoder.embed_tokens = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.lm_head, self.shared)
@auto_docstring(custom_args=SEAMLESS_M4T_V2_COMMON_CUSTOM_ARGS)
def forward(self, input_features: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
logger.warning("This is the same forward method as `SeamlessM4Tv2ForSpeechToText`. It doesn't use `self.t2u_model`.If you want to generate speech, use the `generate` method.")
encoder_outputs = self.speech_encoder(input_features=input_features, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
encoder_attention_mask = attention_mask
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(encoder_outputs[0].device)
encoder_attention_mask = _compute_new_attention_mask(hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths)
decoder_outputs = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@torch.no_grad()
def generate(self, input_features: Optional[torch.Tensor]=None, return_intermediate_token_ids: Optional[bool]=None, tgt_lang: Optional[str]=None, speaker_id: Optional[int]=0, **kwargs) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]:
"""
Generates translated audio waveforms.
<Tip>
This method successively calls the `.generate` function of two different sub-models. You can specify keyword
arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
that will be passed to one of them.
For example, calling `.generate(input_features, num_beams=4, speech_do_sample=True)` will successively perform
beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Args:
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
Input audio features. This should be returned by the [`SeamlessM4TFeatureExtractor`] class or the
[`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
return_intermediate_token_ids (`bool`, *optional*):
If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
to get translated text alongside the audio.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
speaker_id (`int`, *optional*, defaults to 0):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
Returns:
`Union[SeamlessM4Tv2GenerationOutput, tuple[Tensor]]`:
- If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
- If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
sequence_length)` and `waveform_lengths` which gives the length of each sample.
"""
batch_size = len(input_features) if input_features is not None else len(kwargs.get('inputs_embeds'))
if tgt_lang is None:
raise ValueError('You must specify a `tgt_lang` to generate translated speech.')
else:
tgt_lang = tgt_lang.replace('__', '')
for key in ['text_decoder_lang_to_code_id', 't2u_lang_code_to_id', 'vocoder_lang_code_to_id']:
lang_code_to_id = getattr(self.generation_config, key, None)
if lang_code_to_id is None:
raise ValueError(f"This model generation config doesn't have a `{key}` key which maps the target language\n to the right token id. Make sure to load the right generation config.")
elif tgt_lang not in lang_code_to_id:
raise ValueError(f"`tgt_lang={tgt_lang}` is not supported by this model.\n Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports\n more languages for text translation than for speech synthesis.")
kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
kwargs_text['output_hidden_states'] = True
kwargs_text['return_dict_in_generate'] = True
kwargs_text['output_scores'] = True
text_decoder_input_ids = kwargs_text.get('decoder_input_ids')
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
kwargs_text['decoder_input_ids'] = text_decoder_input_ids
text_generation_output = super().generate(input_features, **kwargs_text)
sequences = text_generation_output.sequences
num_return_sequences = len(sequences) // batch_size
attention_mask = kwargs_speech.get('attention_mask', kwargs_text.get('attention_mask', None))
encoder_hidden_states = self.speech_encoder(input_features=input_features, attention_mask=attention_mask)[0]
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(encoder_hidden_states.device)
attention_mask = _compute_new_attention_mask(hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths)
attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0)
encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0)
t2u_input_embeds = self.text_decoder(input_ids=sequences[:, :-1], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask).last_hidden_state
pad_token_id = self.generation_config.pad_token_id
seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1)
t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
kwargs_speech['attention_mask'] = t2u_model_attention_mask
t2u_input_ids = sequences[:, 2:-1]
t2u_input_ids = torch.masked_fill(t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id)
t2u_subwords = self._indices_to_subwords(t2u_input_ids)
t2u_char_count_per_id = self._count_character_length_in_subword(t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id)
pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1))
t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1)
t2u_char_input_ids = self._get_char_input_ids(t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id)
t2u_output = self.t2u_model(inputs_embeds=t2u_input_embeds, char_input_ids=t2u_char_input_ids, char_count_per_id=t2u_char_count_per_id, **kwargs_speech)
t2u_logits = t2u_output[0]
padding_mask = t2u_output[1].bool()
temperature = kwargs_speech.get('temperature', None)
if (temperature is None or temperature == 1.0) or not kwargs_speech.get('do_sample', False):
unit_ids = t2u_logits.argmax(dim=-1)
else:
t2u_logits = t2u_logits / temperature
probs = nn.functional.softmax(t2u_logits, dim=-1)
probs = probs.reshape((-1, probs.shape[2]))
unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1)
output_unit_ids = unit_ids.detach().clone()
replace_mask = (unit_ids == self.config.t2u_eos_token_id) | ~padding_mask
unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id)
unit_ids = torch.where(unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset)
vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids), device=self.device)
speaker_id = torch.tensor([[speaker_id]] * len(unit_ids), device=self.device)
waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id)
if return_intermediate_token_ids:
return SeamlessM4Tv2GenerationOutput(waveform=waveform, waveform_lengths=waveform_lengths, sequences=sequences, unit_sequences=output_unit_ids)
return (waveform, waveform_lengths)
| null | 12
| 2
| 31
| 5
| 21
| 6
| 3
| 0.31
| 1
| 14
| 7
| 0
| 10
| 6
| 11
| 17
| 374
| 62
| 239
| 88
| 201
| 74
| 126
| 62
| 114
| 16
| 2
| 3
| 36
|
5,168
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ForSpeechToText
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n The speech-to-text SeamlessM4Tv2 Model transformer which can be used for S2TT.\n ')
class SeamlessM4Tv2ForSpeechToText(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = ['text_encoder', 't2u_model', 'vocoder']
main_input_name = 'input_features'
_tied_weights_keys = ['lm_head.weight', 'text_decoder.embed_tokens.weight']
def __init__(self, config: SeamlessM4Tv2Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config)
self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_encoder(self):
return self.speech_encoder
def get_decoder(self):
return self.text_decoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_decoder.embed_tokens = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.lm_head, self.shared)
@auto_docstring(custom_args=SEAMLESS_M4T_V2_COMMON_CUSTOM_ARGS)
def forward(self, input_features: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.speech_encoder(input_features=input_features, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
encoder_attention_mask = attention_mask
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(encoder_outputs[0].device)
encoder_attention_mask = _compute_new_attention_mask(hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths)
decoder_outputs = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
def generate(self, input_features=None, tgt_lang=None, generation_config=None, logits_processor=None, stopping_criteria=None, prefix_allowed_tokens_fn=None, synced_gpus=False, **kwargs):
"""
Generates sequences of token ids.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
Input audio features. This should be returned by the [`SeamlessM4TFeatureExtractor`] class or the
[`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], list[int]]`, *optional*):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
`input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
Retrieval](https://huggingface.co/papers/2010.00904).
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed to avoid deadlocking with
`FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model.
Return:
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible
[`~utils.ModelOutput`] types are:
- [`~generation.GenerateEncoderDecoderOutput`],
- [`~generation.GenerateBeamEncoderDecoderOutput`]
"""
text_decoder_input_ids = kwargs.pop('decoder_input_ids', None)
input_features = input_features if input_features is not None else kwargs.pop('inputs')
if tgt_lang is not None:
inputs = kwargs.get('input_embeds') if input_features is None else input_features
inputs = inputs if inputs is not None else kwargs.get('encoder_outputs', {'last_hidden_state': None})['last_hidden_state']
batch_size = len(inputs)
if hasattr(self.generation_config, 'text_decoder_lang_to_code_id'):
tgt_lang = tgt_lang.replace('__', '')
if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id:
raise ValueError(f"`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in\n {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}")
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
else:
raise ValueError("This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps\n the target language to the right token id. Make sure to load the right generation config.")
else:
logger.warning('You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get\n a correct generation, otherwise the generation will probably make no sense.')
return super().generate(input_features, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, decoder_input_ids=text_decoder_input_ids, **kwargs)
| null | 11
| 2
| 22
| 2
| 15
| 5
| 3
| 0.39
| 1
| 10
| 5
| 0
| 10
| 4
| 11
| 17
| 275
| 30
| 176
| 61
| 136
| 69
| 78
| 33
| 66
| 16
| 2
| 3
| 34
|
5,169
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ForTextToSpeech
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n The text-to-speech SeamlessM4Tv2 Model transformer which can be used for T2ST.\n ')
class SeamlessM4Tv2ForTextToSpeech(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = ['speech_encoder']
main_input_name = 'input_ids'
_tied_weights_keys = ['lm_head.weight', 'text_encoder.embed_tokens.weight', 'text_decoder.embed_tokens.weight']
def __init__(self, config: SeamlessM4Tv2Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.text_encoder = SeamlessM4Tv2Encoder(config, self.shared)
self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config)
self.vocoder = SeamlessM4Tv2CodeHifiGan(config)
def get_encoder(self):
return self.text_encoder
def get_decoder(self):
return self.text_decoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_encoder.embed_tokens = value
self.text_decoder.embed_tokens = value
self.shared = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.lm_head, self.shared)
@auto_docstring(custom_args=SEAMLESS_M4T_V2_COMMON_CUSTOM_ARGS)
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
logger.warning("This is the same forward method as `SeamlessM4Tv2ForTextToText`.It doesn't use the text-to-unit model `SeamlessM4Tv2TextToUnitForConditionalGeneration`.If you want to generate speech, use the `.generate` method.")
encoder_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
encoder_attention_mask = attention_mask
decoder_outputs = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@torch.no_grad()
def generate(self, input_ids: Optional[torch.Tensor]=None, return_intermediate_token_ids: Optional[bool]=None, tgt_lang: Optional[str]=None, speaker_id: Optional[int]=0, **kwargs) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]:
"""
Generates translated audio waveforms.
<Tip>
This method successively calls the `.generate` function of two different sub-models. You can specify keyword
arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
that will be passed to one of them.
For example, calling `.generate(input_ids, num_beams=4, speech_do_sample=True)` will successively perform
beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
return_intermediate_token_ids (`bool`, *optional*):
If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
to get translated text alongside the audio.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
speaker_id (`int`, *optional*, defaults to 0):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
Returns:
`Union[SeamlessM4Tv2GenerationOutput, tuple[Tensor]]`:
- If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
- If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
sequence_length)` and `waveform_lengths` which gives the length of each sample.
"""
batch_size = len(input_ids) if input_ids is not None else len(kwargs.get('inputs_embeds'))
if tgt_lang is None:
raise ValueError('You must specify a `tgt_lang` to generate translated speech.')
else:
tgt_lang = tgt_lang.replace('__', '')
for key in ['text_decoder_lang_to_code_id', 't2u_lang_code_to_id', 'vocoder_lang_code_to_id']:
lang_code_to_id = getattr(self.generation_config, key, None)
if lang_code_to_id is None:
raise ValueError(f"This model generation config doesn't have a `{key}` key which maps the target language\n to the right token id. Make sure to load the right generation config.")
elif tgt_lang not in lang_code_to_id:
raise ValueError(f"`tgt_lang={tgt_lang}` is not supported by this model.\n Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports\n more languages for text translation than for speech synthesis.")
kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
kwargs_text['output_hidden_states'] = True
kwargs_text['return_dict_in_generate'] = True
kwargs_text['output_scores'] = True
text_decoder_input_ids = kwargs_text.get('decoder_input_ids')
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
kwargs_text['decoder_input_ids'] = text_decoder_input_ids
text_generation_output = super().generate(input_ids, **kwargs_text)
sequences = text_generation_output.sequences
num_return_sequences = len(sequences) // batch_size
attention_mask = kwargs_speech.get('attention_mask', kwargs_text.get('attention_mask', None))
if attention_mask is not None:
attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0)
encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0)
t2u_input_embeds = self.text_decoder(input_ids=sequences[:, :-1], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask).last_hidden_state
pad_token_id = self.generation_config.pad_token_id
seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1)
t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
kwargs_speech['attention_mask'] = t2u_model_attention_mask
t2u_input_ids = sequences[:, 2:-1]
t2u_input_ids = torch.masked_fill(t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id)
t2u_subwords = self._indices_to_subwords(t2u_input_ids)
t2u_char_count_per_id = self._count_character_length_in_subword(t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id)
pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1))
t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1)
t2u_char_input_ids = self._get_char_input_ids(t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id)
t2u_output = self.t2u_model(inputs_embeds=t2u_input_embeds, char_input_ids=t2u_char_input_ids, char_count_per_id=t2u_char_count_per_id, **kwargs_speech)
t2u_logits = t2u_output[0]
padding_mask = t2u_output[1].bool()
temperature = kwargs_speech.get('temperature', None)
if (temperature is None or temperature == 1.0) or not kwargs_speech.get('do_sample', False):
unit_ids = t2u_logits.argmax(dim=-1)
else:
t2u_logits = t2u_logits / temperature
probs = nn.functional.softmax(t2u_logits, dim=-1)
probs = probs.reshape((-1, probs.shape[2]))
unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1)
output_unit_ids = unit_ids.detach().clone()
replace_mask = (unit_ids == self.config.t2u_eos_token_id) | ~padding_mask
unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id)
unit_ids = torch.where(unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset)
vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids), device=self.device)
speaker_id = torch.tensor([[speaker_id]] * len(unit_ids), device=self.device)
waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id)
if return_intermediate_token_ids:
return SeamlessM4Tv2GenerationOutput(waveform=waveform, waveform_lengths=waveform_lengths, sequences=sequences, unit_sequences=output_unit_ids)
return (waveform, waveform_lengths)
| null | 12
| 2
| 30
| 5
| 20
| 6
| 3
| 0.32
| 1
| 15
| 8
| 0
| 10
| 6
| 11
| 17
| 365
| 62
| 230
| 85
| 193
| 74
| 124
| 60
| 112
| 15
| 2
| 3
| 35
|
5,170
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ForTextToText
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n The text-to-text SeamlessM4Tv2 Model transformer which can be used for T2TT.\n ')
class SeamlessM4Tv2ForTextToText(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = ['speech_encoder', 't2u_model', 'vocoder']
main_input_name = 'input_ids'
_tied_weights_keys = ['lm_head.weight', 'text_encoder.embed_tokens.weight', 'text_decoder.embed_tokens.weight']
def __init__(self, config: SeamlessM4Tv2Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.text_encoder = SeamlessM4Tv2Encoder(config, self.shared)
self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_encoder(self):
return self.text_encoder
def get_decoder(self):
return self.text_decoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_encoder.embed_tokens = value
self.text_decoder.embed_tokens = value
self.shared = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.lm_head, self.shared)
@auto_docstring(custom_args=SEAMLESS_M4T_V2_COMMON_CUSTOM_ARGS)
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
encoder_attention_mask = attention_mask
decoder_outputs = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
def generate(self, input_ids=None, tgt_lang=None, generation_config=None, logits_processor=None, stopping_criteria=None, prefix_allowed_tokens_fn=None, synced_gpus=False, **kwargs):
"""
Generates sequences of token ids.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_ids (`torch.Tensor` of varying shape depending on the modality, *optional*):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], list[int]]`, *optional*):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
`input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
Retrieval](https://huggingface.co/papers/2010.00904).
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed to avoid deadlocking with
`FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model.
Return:
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible
[`~utils.ModelOutput`] types are:
- [`~generation.GenerateEncoderDecoderOutput`],
- [`~generation.GenerateBeamEncoderDecoderOutput`]
"""
text_decoder_input_ids = kwargs.pop('decoder_input_ids', None)
if tgt_lang is not None:
batch_size = len(input_ids) if input_ids is not None else len(kwargs.get('inputs_embeds'))
if hasattr(self.generation_config, 'text_decoder_lang_to_code_id'):
tgt_lang = tgt_lang.replace('__', '')
if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id:
raise ValueError(f"`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in\n {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}")
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
else:
raise ValueError("This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps\n the target language to the right token id. Make sure to load the right generation config.")
else:
logger.warning('You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get\n a correct generation, otherwise the generation will probably make no sense.')
return super().generate(input_ids, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, decoder_input_ids=text_decoder_input_ids, **kwargs)
| null | 11
| 2
| 22
| 2
| 14
| 6
| 3
| 0.37
| 2
| 10
| 5
| 0
| 10
| 4
| 11
| 17
| 260
| 33
| 166
| 59
| 126
| 61
| 75
| 31
| 63
| 15
| 2
| 3
| 31
|
5,171
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2GenerationOutput
|
from ...utils import ModelOutput, auto_docstring, logging
from dataclasses import dataclass
import torch
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Class defining the generated outputs from [`SeamlessM4Tv2Model`], [`SeamlessM4Tv2ForTextToText`],\n [`SeamlessM4Tv2ForTextToSpeech`], [`SeamlessM4Tv2ForSpeechToSpeech`] and [`SeamlessM4Tv2ForTextToSpeech`].\n ')
class SeamlessM4Tv2GenerationOutput(ModelOutput):
"""
waveform (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
The final audio waveform predicted by the model.
waveform_lengths (`torch.IntTensor` of shape `(batch_size,)`, *optional*):
The length in samples of each element in the `waveform` batch.
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The generated translated sequences. This is the output of the text-to-text or the speech-to-text models.
The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished
early due to the `eos_token_id`.
unit_sequences (`torch.LongTensor` of shape `(batch_size, unit_sequence_length)`, *optional*):
The generated translated unit sequences. This is the output of the text-to-units model. The second
dimension (unit_sequence_length) is either equal to `t2u_max_length` or shorter if all batches finished
early due to the `t2u_eos_token_id`.
"""
waveform: Optional[torch.FloatTensor] = None
waveform_lengths: Optional[torch.IntTensor] = None
sequences: Optional[tuple[torch.FloatTensor]] = None
unit_sequences: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class defining the generated outputs from [`SeamlessM4Tv2Model`], [`SeamlessM4Tv2ForTextToText`],\n [`SeamlessM4Tv2ForTextToSpeech`], [`SeamlessM4Tv2ForSpeechToSpeech`] and [`SeamlessM4Tv2ForTextToSpeech`].\n ')
class SeamlessM4Tv2GenerationOutput(ModelOutput):
'''
waveform (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
The final audio waveform predicted by the model.
waveform_lengths (`torch.IntTensor` of shape `(batch_size,)`, *optional*):
The length in samples of each element in the `waveform` batch.
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The generated translated sequences. This is the output of the text-to-text or the speech-to-text models.
The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished
early due to the `eos_token_id`.
unit_sequences (`torch.LongTensor` of shape `(batch_size, unit_sequence_length)`, *optional*):
The generated translated unit sequences. This is the output of the text-to-units model. The second
dimension (unit_sequence_length) is either equal to `t2u_max_length` or shorter if all batches finished
early due to the `t2u_eos_token_id`.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 2
| 5
| 5
| 4
| 17
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
5,172
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2HifiGan
|
from torch import Tensor, nn
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
import torch
class SeamlessM4Tv2HifiGan(nn.Module):
def __init__(self, config: SeamlessM4Tv2Config):
super().__init__()
model_in_dim = config.unit_embed_dim + config.lang_embed_dim + config.spkr_embed_dim
self.leaky_relu_slope = config.leaky_relu_slope
self.num_kernels = len(config.resblock_kernel_sizes)
self.num_upsamples = len(config.upsample_rates)
self.conv_pre = nn.Conv1d(model_in_dim, config.upsample_initial_channel, kernel_size=7, stride=1, padding=3)
self.upsampler = nn.ModuleList()
for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
self.upsampler.append(nn.ConvTranspose1d(config.upsample_initial_channel // 2 ** i, config.upsample_initial_channel // 2 ** (i + 1), kernel_size=kernel_size, stride=upsample_rate, padding=(kernel_size - upsample_rate) // 2))
self.resblocks = nn.ModuleList()
for i in range(len(self.upsampler)):
channels = config.upsample_initial_channel // 2 ** (i + 1)
for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor:
"""
Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
waveform.
Args:
spectrogram (`torch.FloatTensor`):
Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
model_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim`
is the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`.
Returns:
`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
"""
hidden_states = self.conv_pre(input_embeds)
for i in range(self.num_upsamples):
hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
hidden_states = self.upsampler[i](hidden_states)
res_state = self.resblocks[i * self.num_kernels](hidden_states)
for j in range(1, self.num_kernels):
res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
hidden_states = res_state / self.num_kernels
hidden_states = nn.functional.leaky_relu(hidden_states)
hidden_states = self.conv_post(hidden_states)
hidden_states = torch.tanh(hidden_states)
waveform = hidden_states.squeeze(1)
return waveform
|
class SeamlessM4Tv2HifiGan(nn.Module):
def __init__(self, config: SeamlessM4Tv2Config):
pass
def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor:
'''
Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
waveform.
Args:
spectrogram (`torch.FloatTensor`):
Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
model_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim`
is the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`.
Returns:
`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
'''
pass
| 3
| 1
| 34
| 5
| 22
| 7
| 4
| 0.31
| 1
| 6
| 2
| 0
| 2
| 7
| 2
| 12
| 70
| 11
| 45
| 19
| 42
| 14
| 31
| 19
| 28
| 4
| 1
| 2
| 7
|
5,173
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2Model
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n The original SeamlessM4Tv2 Model transformer which can be used for every tasks available (S2ST, S2TT, T2TT, T2ST).\n ')
class SeamlessM4Tv2Model(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight', 'text_encoder.embed_tokens.weight', 'text_decoder.embed_tokens.weight']
def __init__(self, config, current_modality='text'):
"""
current_modality (`str`, *optional*, defaults to `"text"`):
Default modality. Used to initialize the model.
"""
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.text_encoder = SeamlessM4Tv2Encoder(config, self.shared)
self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config)
self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
self.current_modality = current_modality
if current_modality == 'speech':
self.main_input_name = 'input_features'
self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config)
self.vocoder = SeamlessM4Tv2CodeHifiGan(config)
def set_modality(self, modality='text'):
if modality == 'text':
self.main_input_name = 'input_ids'
self.current_modality = 'text'
elif modality == 'speech':
self.main_input_name = 'input_features'
self.current_modality = 'speech'
else:
raise ValueError(f'`modality={modality}` is not a valid modality. It must be `text` or `speech`.')
def get_encoder(self):
if self.current_modality == 'text':
return self.text_encoder
else:
return self.speech_encoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_encoder.embed_tokens = value
self.text_decoder.embed_tokens = value
self.shared = value
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.lm_head, self.shared)
@auto_docstring(custom_args=SEAMLESS_M4T_V2_COMMON_CUSTOM_ARGS)
def forward(self, input_ids: Optional[torch.LongTensor]=None, input_features: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
if input_ids is None and input_features is None and (inputs_embeds is None) and (encoder_outputs is None):
raise ValueError('`input_ids`,`input_features`, `inputs_embeds` and `encoder_outputs` are all empty. Make sure at least one of them is not.')
elif input_features is not None:
if input_ids is not None:
logger.warning('`input_ids` is not `None` but `input_features` has been given.`input_features` will be used in priority through the `speech_encoder`. Make sure that `input_features` and `input_ids` are mutually exclusive.')
if inputs_embeds is not None:
logger.warning('`inputs_embeds` is not `None` but `input_features` has been given.`input_features` will be used in priority through `speech_encoder`. `inputs_embeds` will be ignored.')
logger.warning('This calls the same method `forward` as `SeamlessM4Tv2ForTextToText` and `SeamlessM4Tv2ForSpeechToText`depending on the input modality. If you want to generate speech, use the `generate` method.')
self.set_modality('speech')
encoder_outputs = self.speech_encoder(input_features=input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif input_ids is not None or inputs_embeds is not None:
logger.warning('This calls the same method `forward` as `SeamlessM4Tv2ForTextToText` and `SeamlessM4Tv2ForSpeechToText`depending on the input modality. If you want to generate speech, use the `generate` method.')
self.set_modality('text')
encoder_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
encoder_attention_mask = attention_mask
if self.current_modality == 'speech' and attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(encoder_outputs[0].device)
encoder_attention_mask = _compute_new_attention_mask(hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths)
decoder_outputs = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@torch.no_grad()
def generate(self, input_ids: Optional[torch.Tensor]=None, input_features: Optional[torch.Tensor]=None, return_intermediate_token_ids: Optional[bool]=None, tgt_lang: Optional[str]=None, speaker_id: Optional[int]=0, generate_speech: Optional[bool]=True, **kwargs) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]:
"""
Generates translated token ids and/or translated audio waveforms.
<Tip>
This method successively calls the `.generate` function of two different sub-models. You can specify keyword
arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
that will be passed to one of them.
For example, calling `.generate(input_ids=input_ids, num_beams=4, speech_do_sample=True)` will successively
perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`, *optional*):
Input audio features. This should be returned by the [`SeamlessM4TFeatureExtractor`] class or the
[`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
return_intermediate_token_ids (`bool`, *optional*):
If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
to get translated text alongside the audio.
Note that if `generate_speech=False`, this parameter will be ignored and
the text tokens are returned.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
speaker_id (`int`, *optional*, defaults to 0):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
generate_speech (`bool`, *optional*, defaults to `True`):
If `False`, will only returns the text tokens and won't generate speech.
kwargs (*optional*):
Remaining dictioy of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
Returns:
`Union[SeamlessM4Tv2GenerationOutput, tuple[Tensor], ModelOutput]`:
- If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
- If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of
shape `(batch_size, sequence_length)` and `waveform_lengths` which gives the length of each sample.
- If `generate_speech=False`, it will returns `ModelOutput`.
"""
if input_ids is None and input_features is None and (kwargs.get('inputs_embeds') is None):
raise ValueError('`input_ids`,`input_features` and `inputs_embeds` are all empty. Make sure at least one of them is not.')
if generate_speech and tgt_lang is None:
raise ValueError('You must specify a `tgt_lang` to generate translated speech.')
if tgt_lang is not None:
tgt_lang = tgt_lang.replace('__', '')
if generate_speech:
keys_to_check = ['text_decoder_lang_to_code_id', 't2u_lang_code_to_id', 'vocoder_lang_code_to_id']
else:
keys_to_check = ['text_decoder_lang_to_code_id']
for key in keys_to_check:
lang_code_to_id = getattr(self.generation_config, key, None)
if lang_code_to_id is None:
raise ValueError(f"This model generation config doesn't have a `{key}` key which maps the target language\n to the right token id. Make sure to load the right generation config.")
elif tgt_lang not in lang_code_to_id:
raise ValueError(f"`tgt_lang={tgt_lang}` is not supported by this model.\n Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports\n more languages for text translation than for speech synthesis.")
batch_size = len(input_features) if input_features is not None else len(input_ids) if input_ids is not None else len(kwargs.get('inputs_embeds'))
kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
kwargs_text['output_hidden_states'] = True
kwargs_text['return_dict_in_generate'] = True
kwargs_text['output_scores'] = True
text_decoder_input_ids = kwargs_text.get('decoder_input_ids')
if tgt_lang is not None:
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
kwargs_text['decoder_input_ids'] = text_decoder_input_ids
if input_features is not None:
self.set_modality('speech')
if input_ids is not None:
logger.warning('`input_features` and `input_ids` are both non empty. `input_features` will be used in priority through the speech encoder. Make sure `input_features=None` if you want to use the text encoder.')
text_generation_output = super().generate(input_features=input_features, **kwargs_text)
else:
self.set_modality('text')
text_generation_output = super().generate(input_ids=input_ids, input_features=None, **kwargs_text)
sequences = text_generation_output.sequences
if not generate_speech:
return text_generation_output
num_return_sequences = len(sequences) // batch_size
attention_mask = kwargs_speech.get('attention_mask', kwargs_text.get('attention_mask', None))
if self.current_modality == 'speech':
encoder_hidden_states = self.speech_encoder(input_features=input_features, attention_mask=attention_mask).last_hidden_state
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(encoder_hidden_states.device)
attention_mask = _compute_new_attention_mask(hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths)
else:
encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
if attention_mask is not None:
attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0)
encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0)
t2u_input_embeds = self.text_decoder(input_ids=sequences[:, :-1], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask).last_hidden_state
pad_token_id = self.generation_config.pad_token_id
seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1)
t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
kwargs_speech['attention_mask'] = t2u_model_attention_mask
t2u_input_ids = sequences[:, 2:-1]
t2u_input_ids = torch.masked_fill(t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id)
t2u_subwords = self._indices_to_subwords(t2u_input_ids)
t2u_char_count_per_id = self._count_character_length_in_subword(t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id)
pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1))
t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1)
t2u_char_input_ids = self._get_char_input_ids(t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id)
t2u_output = self.t2u_model(inputs_embeds=t2u_input_embeds, char_input_ids=t2u_char_input_ids, char_count_per_id=t2u_char_count_per_id, **kwargs_speech)
t2u_logits = t2u_output[0]
padding_mask = t2u_output[1].bool()
temperature = kwargs_speech.get('temperature', None)
if (temperature is None or temperature == 1.0) or not kwargs_speech.get('do_sample', False):
unit_ids = t2u_logits.argmax(dim=-1)
else:
t2u_logits = t2u_logits / temperature
probs = nn.functional.softmax(t2u_logits, dim=-1)
probs = probs.reshape((-1, probs.shape[2]))
unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1)
output_unit_ids = unit_ids.detach().clone()
replace_mask = (unit_ids == self.config.t2u_eos_token_id) | ~padding_mask
unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id)
unit_ids = torch.where(unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset)
vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids), device=self.device)
speaker_id = torch.tensor([[speaker_id]] * len(unit_ids), device=self.device)
waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id)
if return_intermediate_token_ids:
return SeamlessM4Tv2GenerationOutput(waveform=waveform, waveform_lengths=waveform_lengths, sequences=sequences, unit_sequences=output_unit_ids)
return (waveform, waveform_lengths)
| null | 12
| 3
| 41
| 6
| 28
| 7
| 5
| 0.28
| 1
| 15
| 8
| 0
| 10
| 9
| 11
| 17
| 478
| 72
| 319
| 93
| 278
| 88
| 165
| 64
| 153
| 20
| 2
| 3
| 54
|
5,174
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2PreTrainedModel
|
import math
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...modeling_utils import PreTrainedModel
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
@auto_docstring
class SeamlessM4Tv2PreTrainedModel(PreTrainedModel):
config: SeamlessM4Tv2Config
base_model_prefix = 'seamless_m4t_v2'
supports_gradient_checkpointing = True
_no_split_modules = ['SeamlessM4Tv2EncoderLayer', 'SeamlessM4Tv2DecoderLayer', 'SeamlessM4Tv2ConformerEncoderLayer', 'SeamlessM4Tv2TextToUnitDecoderLayer']
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, SeamlessM4Tv2ConformerSelfAttention):
if hasattr(module, 'pos_bias_u'):
nn.init.xavier_uniform_(module.pos_bias_u)
if hasattr(module, 'pos_bias_v'):
nn.init.xavier_uniform_(module.pos_bias_v)
elif isinstance(module, SeamlessM4Tv2ConformerFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, SeamlessM4Tv2TextToUnitDecoder):
module.pos_emb_alpha_char.data.fill_(1)
module.pos_emb_alpha.data.fill_(1)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, (nn.Conv1d, nn.ConvTranspose1d)):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
kernel_size, stride = (self.config.adaptor_kernel_size, self.config.adaptor_stride)
pad = kernel_size // 2
seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1)
seq_lens = (seq_lens + 2 * pad - kernel_size) / stride + 1
return seq_lens.floor()
def _indices_to_subwords(self, input_ids):
"""
Returns the corresponding text string for each input id.
"""
if not hasattr(self.generation_config, 'id_to_text'):
raise ValueError("This model generation config doesn't have a `id_to_text` key which maps\n token ids to subwords. Make sure to load the right generation config.")
batch_size, sequence_len = input_ids.shape
subwords_batch = []
for batch_id in range(batch_size):
subwords = []
for i in range(sequence_len):
subword = self.generation_config.id_to_text.get(str(input_ids[batch_id, i].item()))
subwords.append(str(subword))
subwords_batch.append(subwords)
return subwords_batch
def _count_character_length_in_subword(self, input_ids, subwords_batch, merge_space_with_prev_subword=False, pad_token_id=0, unk_token_id=1, space='▁'):
"""
Counts the number of characters per text string associated with the input token id.
Args:
input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
subwords_batch (`list[list[str]]` of shape `(batch_size, sequence_length)`):
Corresponding text string for each input id.
merge_space_with_prev_subword (`bool`, *optional*, defaults to `False`):
Indicates if the space character is merged with the previous subword. If `False`, it will be merged
with the next subword.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the _padding_ text token. If it is encountered when calculating the length of a subword
sample, the lengths of subsequent subwords will be set to 0.
unk_token_id (`int`, *optional*, defaults to 1):
The id of the _unknown_ text token. Associated to a subword of length 1.
space (`str`, *optional*, defaults to `"▁"`):
The space character.
"""
batch_size, _ = input_ids.shape
char_count_per_id = input_ids.new_zeros(input_ids.size())
subword_lens = input_ids.ne(pad_token_id).sum(1)
for batch_id in range(batch_size):
subword_indices = input_ids[batch_id, :subword_lens[batch_id]]
subwords = subwords_batch[batch_id][:subword_lens[batch_id]]
is_next_start_with_space = [len(subwords[i + 1]) > 1 and subwords[i + 1][0] == space if i < len(subwords) - 1 else False for i in range(len(subwords))]
is_punc = [len(subwords[i]) == 1 and (not subwords[i].isalpha()) and (not subwords[i].isnumeric()) and (subwords[i] != space) for i in range(len(subwords))]
for i, (subword_idx, subword) in enumerate(zip(subword_indices, subwords)):
if subword_idx == pad_token_id:
break
if subword_idx == unk_token_id:
char_len = 1
if merge_space_with_prev_subword and is_next_start_with_space[i]:
char_len += 1
else:
char_len = len(subword)
if merge_space_with_prev_subword:
if is_next_start_with_space[i]:
char_len += 1
if i > 0 and is_next_start_with_space[i - 1]:
char_len -= 1
elif is_punc[i] and is_next_start_with_space[i]:
char_len += 1
elif i > 0 and is_punc[i - 1] and is_next_start_with_space[i - 1]:
char_len -= 1
char_count_per_id[batch_id, i] = char_len
return char_count_per_id
def _get_char_input_ids(self, input_ids, subwords_batch, char_count_per_id, pad_token_id=0, unk_token_id=1):
"""
Returns the corresponding character input id for each character of `subwords_batch`.
Args:
input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
subwords_batch (`list[list[str]]` of shape `(batch_size, sequence_length)`):
Corresponding text string for each input id.
char_count_per_id (`torch.Tensor` of shape `(batch_size, sequence_length)`):
Number of characters per input id.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the _padding_ text token. If it is encountered when calculating the length of a subword
sample, the lengths of subsequent subwords will be set to 0.
unk_token_id (`int`, *optional*, defaults to 1):
The id of the _unknown_ text token. Associated to a subword of length 1.
Returns:
`torch.Tensor`: Tensor of shape `(batch_size, char_sequence_length)` containing the id of each character.
"""
if not hasattr(self.generation_config, 'char_to_id'):
raise ValueError("This model generation config doesn't have a `char_to_id` key which maps\n characters to character ids. Make sure to load the right generation config.")
batch_size = input_ids.shape[0]
max_len = int(char_count_per_id.sum(1).max().item())
char_seqs = input_ids.new_zeros((batch_size, max_len)).fill_(pad_token_id)
subword_lens = input_ids.ne(pad_token_id).sum(1)
for batch_id in range(batch_size):
total = 0
subword_indices = input_ids[batch_id, :subword_lens[batch_id]]
subwords = subwords_batch[batch_id][:subword_lens[batch_id]]
for subword_idx, subword in zip(subword_indices, subwords):
if subword_idx == unk_token_id:
char_ids = [unk_token_id]
else:
char_ids = [self.generation_config.char_to_id.get(ch, unk_token_id) for ch in list(subword)]
char_seq_len = len(char_ids)
char_seqs[batch_id, total:total + char_seq_len] = torch.tensor(char_ids).to(char_seqs)
total += char_seq_len
return char_seqs
def _hard_upsample(self, hidden_states, durations):
"""
Repeats the time dimension of each sample in the batch based on the corresponding duration.
Args:
hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, *)`, *optional*):
The sequence to repeat, where `*` is any number of sequence-specific dimensions including none.
durations (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates how many times to repeat time segments.
"""
if hidden_states.size(0) == 1:
hidden_states = torch.repeat_interleave(hidden_states, durations.view(-1), dim=1)
else:
if hidden_states.shape[0] > 1 and self.training:
logger.warning_once('`self.training=True` and you use batching. You lose parallelism during the hifigan\n forward pass because the samples are interleaved.')
hidden_states = [torch.repeat_interleave(hidden_state, duration, dim=0) for hidden_state, duration in zip(hidden_states, durations)]
hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True)
return hidden_states
|
@auto_docstring
class SeamlessM4Tv2PreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
'''Initialize the weights'''
pass
def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
pass
def _indices_to_subwords(self, input_ids):
'''
Returns the corresponding text string for each input id.
'''
pass
def _count_character_length_in_subword(self, input_ids, subwords_batch, merge_space_with_prev_subword=False, pad_token_id=0, unk_token_id=1, space='▁'):
'''
Counts the number of characters per text string associated with the input token id.
Args:
input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
subwords_batch (`list[list[str]]` of shape `(batch_size, sequence_length)`):
Corresponding text string for each input id.
merge_space_with_prev_subword (`bool`, *optional*, defaults to `False`):
Indicates if the space character is merged with the previous subword. If `False`, it will be merged
with the next subword.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the _padding_ text token. If it is encountered when calculating the length of a subword
sample, the lengths of subsequent subwords will be set to 0.
unk_token_id (`int`, *optional*, defaults to 1):
The id of the _unknown_ text token. Associated to a subword of length 1.
space (`str`, *optional*, defaults to `"▁"`):
The space character.
'''
pass
def _get_char_input_ids(self, input_ids, subwords_batch, char_count_per_id, pad_token_id=0, unk_token_id=1):
'''
Returns the corresponding character input id for each character of `subwords_batch`.
Args:
input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
subwords_batch (`list[list[str]]` of shape `(batch_size, sequence_length)`):
Corresponding text string for each input id.
char_count_per_id (`torch.Tensor` of shape `(batch_size, sequence_length)`):
Number of characters per input id.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the _padding_ text token. If it is encountered when calculating the length of a subword
sample, the lengths of subsequent subwords will be set to 0.
unk_token_id (`int`, *optional*, defaults to 1):
The id of the _unknown_ text token. Associated to a subword of length 1.
Returns:
`torch.Tensor`: Tensor of shape `(batch_size, char_sequence_length)` containing the id of each character.
'''
pass
def _hard_upsample(self, hidden_states, durations):
'''
Repeats the time dimension of each sample in the batch based on the corresponding duration.
Args:
hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, *)`, *optional*):
The sequence to repeat, where `*` is any number of sequence-specific dimensions including none.
durations (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates how many times to repeat time segments.
'''
pass
| 8
| 5
| 35
| 4
| 22
| 10
| 6
| 0.43
| 1
| 9
| 2
| 11
| 6
| 0
| 6
| 6
| 233
| 28
| 143
| 51
| 128
| 62
| 99
| 43
| 92
| 12
| 1
| 5
| 37
|
5,175
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2ScaledWordEmbedding
|
from torch import Tensor, nn
import torch
from typing import Optional, Union
class SeamlessM4Tv2ScaledWordEmbedding(nn.Embedding):
"""
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.embed_scale = embed_scale
def forward(self, input_ids: torch.Tensor):
return super().forward(input_ids) * self.embed_scale
|
class SeamlessM4Tv2ScaledWordEmbedding(nn.Embedding):
'''
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
'''
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0):
pass
def forward(self, input_ids: torch.Tensor):
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 1
| 0.5
| 1
| 4
| 0
| 0
| 2
| 1
| 2
| 2
| 11
| 2
| 6
| 4
| 3
| 3
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
5,176
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2SinusoidalPositionalEmbedding
|
from torch import Tensor, nn
import math
import torch
from typing import Optional, Union
class SeamlessM4Tv2SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, 'weights'):
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.register_buffer('weights', emb_weights, persistent=False)
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None):
"""
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(self, input_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values_length: int=0):
if input_ids is not None:
bsz, seq_len = input_ids.size()
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(input_ids.device)
else:
bsz, seq_len = inputs_embeds.size()[:-1]
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, self.padding_idx)
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
|
class SeamlessM4Tv2SinusoidalPositionalEmbedding(nn.Module):
'''This module produces sinusoidal positional embeddings of any length.'''
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None):
pass
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None):
pass
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None):
'''
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
'''
pass
@torch.no_grad()
def forward(self, input_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values_length: int=0):
pass
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx):
'''
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
'''
pass
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
'''
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
'''
pass
| 11
| 4
| 13
| 2
| 9
| 3
| 2
| 0.34
| 1
| 3
| 0
| 0
| 4
| 3
| 5
| 15
| 76
| 13
| 47
| 22
| 37
| 16
| 38
| 18
| 32
| 3
| 1
| 1
| 10
|
5,177
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2SpeechEncoder
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
@auto_docstring(custom_intro='\n Transformer speech encoder consisting of *config.speech_encoder_layers* conformer self attention layers.\n Each layer is a [`SeamlessM4Tv2ConformerEncoderLayer`].\n ')
class SeamlessM4Tv2SpeechEncoder(SeamlessM4Tv2PreTrainedModel):
main_input_name = 'input_features'
def __init__(self, config: SeamlessM4Tv2Config):
super().__init__(config)
self.feature_projection = SeamlessM4Tv2ConformerFeatureProjection(config)
self.encoder = SeamlessM4Tv2ConformerEncoder(config)
self.intermediate_ffn = SeamlessM4Tv2ConformerFeedForward(config, act_fn='relu', dropout=0.0)
self.adapter = SeamlessM4Tv2ConformerAdapter(config) if config.add_adapter else None
self.inner_layer_norm = nn.LayerNorm(config.hidden_size)
self.post_init()
def forward(self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, Wav2Vec2BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_features is None:
raise ValueError('Both `input_features` and `inputs_embeds` are `None` in `SeamlessM4Tv2SpeechEncoder.forward`.\n Make sure one of them is not `None`.')
hidden_states = self.feature_projection(input_features)
encoder_outputs = self.encoder(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = encoder_outputs[0]
expanded_hidden_states = self.intermediate_ffn(hidden_states)
hidden_states = hidden_states + 0.5 * expanded_hidden_states
if self.adapter is not None:
hidden_states = self.adapter(hidden_states, attention_mask=attention_mask)
hidden_states = self.inner_layer_norm(hidden_states)
if not return_dict:
return (hidden_states,) + encoder_outputs[1:]
return Wav2Vec2BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring(custom_intro='\n Transformer speech encoder consisting of *config.speech_encoder_layers* conformer self attention layers.\n Each layer is a [`SeamlessM4Tv2ConformerEncoderLayer`].\n ')
class SeamlessM4Tv2SpeechEncoder(SeamlessM4Tv2PreTrainedModel):
def __init__(self, config: SeamlessM4Tv2Config):
pass
def forward(self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, Wav2Vec2BaseModelOutput]:
pass
| 4
| 0
| 30
| 6
| 24
| 1
| 5
| 0.02
| 1
| 10
| 6
| 0
| 2
| 5
| 2
| 8
| 64
| 13
| 50
| 20
| 39
| 1
| 27
| 12
| 24
| 7
| 2
| 1
| 9
|
5,178
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2TextToUnitDecoder
|
import math
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
@auto_docstring(custom_intro='\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4Tv2DecoderLayer`].\n ')
class SeamlessM4Tv2TextToUnitDecoder(SeamlessM4Tv2PreTrainedModel):
def __init__(self, config: SeamlessM4Tv2Config, embed_tokens: Optional[nn.Embedding]=None):
"""
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
"""
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = nn.Embedding(embed_tokens.num_embeddings, embed_tokens.embedding_dim, self.padding_idx)
self.embed_tokens.weight = embed_tokens.weight
else:
self.embed_tokens = nn.Embedding(self.vocab_size, config.hidden_size, self.padding_idx)
self.embed_char = nn.Embedding(config.char_vocab_size, config.hidden_size)
self.embed_char_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding(self.max_target_positions, config.hidden_size, padding_idx=self.padding_idx)
self.pos_emb_alpha_char = nn.Parameter(torch.ones(1))
self.pos_emb_alpha = nn.Parameter(torch.ones(1))
self.duration_predictor = SeamlessM4Tv2VariancePredictor(config.variance_predictor_embed_dim, config.variance_predictor_hidden_dim, config.variance_predictor_kernel_size, config.variance_pred_dropout)
self.embed_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding(self.max_target_positions, config.hidden_size, padding_idx=self.padding_idx)
layers = []
for _ in range(config.decoder_layers):
layers.append(SeamlessM4Tv2TextToUnitDecoderLayer(config, decoder_attention_heads=config.decoder_attention_heads, decoder_ffn_dim=config.decoder_ffn_dim))
self.layers = nn.ModuleList(layers)
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.gradient_checkpointing = False
self.post_init()
def forward(self, char_input_ids: Optional[torch.LongTensor]=None, char_count_per_id: Optional[torch.LongTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SeamlessM4Tv2TextToUnitDecoderOutput]:
"""
Args:
char_input_ids (`torch.LongTensor` of shape `(batch_size, char_sequence_length)`):
Character indices. The correspondence between characters and indices can be found in `char_to_id`, a
dictionary in the generation configuration.
char_count_per_id (`torch.Tensor` of shape `(batch_size, encoder_sequence_length)`):
Number of characters per text input id.
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
char_padding_mask = _compute_new_attention_mask(char_input_ids, char_count_per_id.sum(1))
char_hidden_states = self._hard_upsample(encoder_hidden_states, char_count_per_id)
char_positions = self.pos_emb_alpha_char * self.embed_char_positions(inputs_embeds=char_hidden_states)
char_hidden_states = self.embed_char(char_input_ids) * self.embed_scale + char_positions + char_hidden_states
log_dur_pred = self.duration_predictor(char_hidden_states, padding_mask=char_padding_mask)
dur_out = torch.clamp(torch.round(torch.expm1(log_dur_pred)).long(), min=1)
dur_out = dur_out.masked_fill(~char_padding_mask.bool(), 0.0)
char_hidden_states = self._hard_upsample(char_hidden_states, dur_out)
positions = self.pos_emb_alpha * self.embed_positions(inputs_embeds=char_hidden_states)
hidden_states = char_hidden_states + positions
padding_mask = _compute_new_attention_mask(hidden_states, dur_out.sum(1))
attention_mask = _prepare_4d_attention_mask(padding_mask, hidden_states.dtype)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, padding_mask=padding_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attns, padding_mask] if v is not None))
return SeamlessM4Tv2TextToUnitDecoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, padding_mask=padding_mask)
|
@auto_docstring(custom_intro='\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4Tv2DecoderLayer`].\n ')
class SeamlessM4Tv2TextToUnitDecoder(SeamlessM4Tv2PreTrainedModel):
def __init__(self, config: SeamlessM4Tv2Config, embed_tokens: Optional[nn.Embedding]=None):
'''
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
'''
pass
def forward(self, char_input_ids: Optional[torch.LongTensor]=None, char_count_per_id: Optional[torch.LongTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SeamlessM4Tv2TextToUnitDecoderOutput]:
'''
Args:
char_input_ids (`torch.LongTensor` of shape `(batch_size, char_sequence_length)`):
Character indices. The correspondence between characters and indices can be found in `char_to_id`, a
dictionary in the generation configuration.
char_count_per_id (`torch.Tensor` of shape `(batch_size, encoder_sequence_length)`):
Number of characters per text input id.
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 4
| 2
| 42
| 5
| 30
| 8
| 5
| 0.25
| 1
| 10
| 5
| 0
| 4
| 16
| 4
| 10
| 172
| 23
| 119
| 49
| 102
| 30
| 68
| 37
| 63
| 14
| 2
| 3
| 20
|
5,179
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2TextToUnitDecoderLayer
|
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
from torch import Tensor, nn
from ...modeling_layers import GradientCheckpointingLayer
import torch
from typing import Optional, Union
from ...activations import ACT2FN
class SeamlessM4Tv2TextToUnitDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4Tv2Config, decoder_ffn_dim=None, decoder_attention_heads=None):
super().__init__()
decoder_ffn_dim = config.decoder_ffn_dim if decoder_ffn_dim is None else decoder_ffn_dim
decoder_attention_heads = config.decoder_attention_heads if decoder_attention_heads is None else decoder_attention_heads
self.dropout = config.dropout
self.embed_dim = config.hidden_size
self.self_attn = SeamlessM4Tv2Attention(embed_dim=self.embed_dim, num_heads=decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.conv1 = nn.Conv1d(self.embed_dim, self.embed_dim, kernel_size=7, stride=1, padding='same')
self.activation_fn = ACT2FN[config.activation_function]
self.conv2 = nn.Conv1d(self.embed_dim, self.embed_dim, kernel_size=7, stride=1, padding='same')
self.conv_layer_norm = nn.LayerNorm(config.hidden_size)
self.conv_dropout = nn.Dropout(self.dropout)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked*
or 0 for *masked*
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
if padding_mask is not None:
hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
hidden_states = self.conv1(hidden_states.transpose(1, 2)).transpose(1, 2)
if padding_mask is not None:
hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.conv2(hidden_states.transpose(1, 2)).transpose(1, 2)
hidden_states = self.conv_dropout(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.conv_layer_norm(hidden_states)
return (hidden_states, self_attn_weights)
|
class SeamlessM4Tv2TextToUnitDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4Tv2Config, decoder_ffn_dim=None, decoder_attention_heads=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> torch.Tensor:
'''
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked*
or 0 for *masked*
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 40
| 6
| 25
| 9
| 4
| 0.33
| 1
| 5
| 2
| 0
| 2
| 9
| 2
| 12
| 81
| 13
| 51
| 21
| 42
| 17
| 34
| 15
| 31
| 4
| 1
| 1
| 7
|
5,180
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2TextToUnitDecoderOutput
|
from ...utils import ModelOutput, auto_docstring, logging
from dataclasses import dataclass
import torch
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Class defining the outputs from [`SeamlessM4Tv2TextToUnitDecoder`].\n ')
class SeamlessM4Tv2TextToUnitDecoderOutput(ModelOutput):
"""
padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
for *masked*
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
padding_mask: Optional[torch.Tensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class defining the outputs from [`SeamlessM4Tv2TextToUnitDecoder`].\n ')
class SeamlessM4Tv2TextToUnitDecoderOutput(ModelOutput):
'''
padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
for *masked*
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 4
| 5
| 5
| 4
| 18
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
5,181
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2TextToUnitForConditionalGeneration
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from ...generation import GenerationMixin
from torch import Tensor, nn
import copy
from ...utils import ModelOutput, auto_docstring, logging
import torch
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n Transformer text-to-unit encoder-decoder with a language model head. The base encoder-decoder model is a [`SeamlessM4Tv2TextToUnitModel`].\n ')
class SeamlessM4Tv2TextToUnitForConditionalGeneration(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = ['vocoder', 'speech_encoder', 'text_encoder', 'text_decoder']
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config: SeamlessM4Tv2Config, embed_tokens_decoder: Optional[nn.Embedding]=None):
"""
embed_tokens_decoder (`nn.Embedding`, *optional*):
input embedding of the decoder.
"""
config = copy.deepcopy(config)
for param, val in config.to_dict().items():
if param.startswith('t2u_'):
config.__setattr__(param[4:], val)
super().__init__(config)
self.model = SeamlessM4Tv2TextToUnitModel(config, embed_tokens_decoder)
self.lm_head = nn.Linear(config.hidden_size, config.t2u_vocab_size, bias=False)
self.post_init()
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, char_input_ids: Optional[torch.LongTensor]=None, char_count_per_id: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
"""
char_input_ids (`torch.LongTensor` of shape `(batch_size, char_sequence_length)`):
Character indices. The correspondence between characters and indices can be found in `char_to_id`, a
dictionary in the generation configuration.
char_count_per_id (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Number of characters per input id.
inputs_embeds (`torch.FloatTensor` of shape`(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(input_ids, char_input_ids=char_input_ids, char_count_per_id=char_count_per_id, attention_mask=attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
lm_logits = self.lm_head(outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return SeamlessM4Tv2TextToUnitOutput(last_hidden_state=lm_logits, padding_mask=outputs.padding_mask, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loss=masked_lm_loss)
def _tie_weights(self) -> None:
if getattr(self.config, 'tie_word_embeddings', True):
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
|
@auto_docstring(custom_intro='\n Transformer text-to-unit encoder-decoder with a language model head. The base encoder-decoder model is a [`SeamlessM4Tv2TextToUnitModel`].\n ')
class SeamlessM4Tv2TextToUnitForConditionalGeneration(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
def __init__(self, config: SeamlessM4Tv2Config, embed_tokens_decoder: Optional[nn.Embedding]=None):
'''
embed_tokens_decoder (`nn.Embedding`, *optional*):
input embedding of the decoder.
'''
pass
def get_encoder(self):
pass
def get_decoder(self):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, char_input_ids: Optional[torch.LongTensor]=None, char_count_per_id: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
'''
char_input_ids (`torch.LongTensor` of shape `(batch_size, char_sequence_length)`):
Character indices. The correspondence between characters and indices can be found in `char_to_id`, a
dictionary in the generation configuration.
char_count_per_id (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Number of characters per input id.
inputs_embeds (`torch.FloatTensor` of shape`(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
'''
pass
def _tie_weights(self) -> None:
pass
| 10
| 2
| 9
| 1
| 8
| 0
| 2
| 0.12
| 2
| 7
| 4
| 0
| 9
| 2
| 9
| 15
| 110
| 16
| 84
| 39
| 56
| 10
| 42
| 21
| 32
| 5
| 2
| 2
| 17
|
5,182
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2TextToUnitModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
import torch
from typing import Optional, Union
from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
@auto_docstring(custom_intro='\n Transformer bare text-to-unit encoder-decoder. The encoder is a [`SeamlessM4Tv2Encoder`] without embeddings and the decoder is a [`SeamlessM4Tv2TextToUnitDecoder`].\n ')
class SeamlessM4Tv2TextToUnitModel(SeamlessM4Tv2PreTrainedModel):
def __init__(self, config: SeamlessM4Tv2Config, embed_tokens_decoder: Optional[nn.Embedding]=None):
"""
embed_tokens_decoder (`nn.Embedding`, *optional*):
input embedding of the decoder.
"""
super().__init__(config)
self.encoder = SeamlessM4Tv2Encoder(config, is_t2u_encoder=True)
self.decoder = SeamlessM4Tv2TextToUnitDecoder(config, embed_tokens_decoder)
self.post_init()
def forward(self, input_ids: Optional[torch.LongTensor]=None, char_input_ids: Optional[torch.LongTensor]=None, char_count_per_id: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
decoder_outputs = self.decoder(char_input_ids=char_input_ids, char_count_per_id=char_count_per_id, encoder_hidden_states=encoder_outputs[0], output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if not return_dict:
return decoder_outputs + encoder_outputs
return SeamlessM4Tv2TextToUnitOutput(last_hidden_state=decoder_outputs.last_hidden_state, padding_mask=decoder_outputs.padding_mask, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
|
@auto_docstring(custom_intro='\n Transformer bare text-to-unit encoder-decoder. The encoder is a [`SeamlessM4Tv2Encoder`] without embeddings and the decoder is a [`SeamlessM4Tv2TextToUnitDecoder`].\n ')
class SeamlessM4Tv2TextToUnitModel(SeamlessM4Tv2PreTrainedModel):
def __init__(self, config: SeamlessM4Tv2Config, embed_tokens_decoder: Optional[nn.Embedding]=None):
'''
embed_tokens_decoder (`nn.Embedding`, *optional*):
input embedding of the decoder.
'''
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, char_input_ids: Optional[torch.LongTensor]=None, char_count_per_id: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]:
pass
| 4
| 1
| 35
| 3
| 30
| 2
| 5
| 0.07
| 1
| 9
| 6
| 0
| 2
| 2
| 2
| 8
| 72
| 7
| 61
| 21
| 43
| 4
| 17
| 6
| 14
| 9
| 2
| 1
| 10
|
5,183
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2TextToUnitOutput
|
from ...utils import ModelOutput, auto_docstring, logging
from dataclasses import dataclass
import torch
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Class defining the outputs from [`SeamlessM4Tv2TextToUnitForConditionalGeneration`] and\n [`SeamlessM4Tv2TextToUnitModel`].\n ')
class SeamlessM4Tv2TextToUnitOutput(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
for *masked*
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
padding_mask: Optional[torch.Tensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
loss: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class defining the outputs from [`SeamlessM4Tv2TextToUnitForConditionalGeneration`] and\n [`SeamlessM4Tv2TextToUnitModel`].\n ')
class SeamlessM4Tv2TextToUnitOutput(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
for *masked*
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.78
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 7
| 9
| 9
| 8
| 34
| 9
| 9
| 8
| 0
| 1
| 0
| 0
|
5,184
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2.SeamlessM4Tv2VariancePredictor
|
from torch import Tensor, nn
from typing import Optional, Union
class SeamlessM4Tv2VariancePredictor(nn.Module):
def __init__(self, embed_dim, hidden_dim, kernel_size, var_pred_dropout):
super().__init__()
self.conv1 = nn.Conv1d(embed_dim, hidden_dim, kernel_size=kernel_size, padding='same')
self.activation_function = nn.ReLU()
self.ln1 = nn.LayerNorm(hidden_dim)
self.dropout_module = nn.Dropout(p=var_pred_dropout)
self.conv2 = nn.Conv1d(hidden_dim, hidden_dim, kernel_size=kernel_size, padding='same')
self.ln2 = nn.LayerNorm(hidden_dim)
self.proj = nn.Linear(hidden_dim, 1)
def forward(self, hidden_states: Tensor, padding_mask: Optional[Tensor]=None) -> Tensor:
if padding_mask is not None:
hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
hidden_states = self.conv1(hidden_states.transpose(1, 2))
hidden_states = self.activation_function(hidden_states).transpose(1, 2)
hidden_states = self.dropout_module(self.ln1(hidden_states))
if padding_mask is not None:
hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
hidden_states = self.conv2(hidden_states.transpose(1, 2))
hidden_states = self.activation_function(hidden_states).transpose(1, 2)
hidden_states = self.dropout_module(self.ln2(hidden_states))
return self.proj(hidden_states).squeeze(dim=2)
|
class SeamlessM4Tv2VariancePredictor(nn.Module):
def __init__(self, embed_dim, hidden_dim, kernel_size, var_pred_dropout):
pass
def forward(self, hidden_states: Tensor, padding_mask: Optional[Tensor]=None) -> Tensor:
pass
| 3
| 0
| 17
| 1
| 16
| 1
| 2
| 0.03
| 1
| 2
| 0
| 0
| 2
| 7
| 2
| 12
| 35
| 2
| 32
| 10
| 29
| 1
| 22
| 10
| 19
| 3
| 1
| 1
| 4
|
5,185
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/configuration_segformer.py
|
transformers.models.segformer.configuration_segformer.SegformerConfig
|
from ...configuration_utils import PretrainedConfig
import warnings
class SegformerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`SegformerModel`]. It is used to instantiate an
SegFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SegFormer
[nvidia/segformer-b0-finetuned-ade-512-512](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_encoder_blocks (`int`, *optional*, defaults to 4):
The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
The number of layers in each encoder block.
sr_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
Sequence reduction ratios in each encoder block.
hidden_sizes (`list[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
Dimension of each of the encoder blocks.
patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
Patch size before each encoder block.
strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
Stride before each encoder block.
num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
mlp_ratios (`list[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability before the classification head.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
drop_path_rate (`float`, *optional*, defaults to 0.1):
The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*, defaults to 256):
The dimension of the all-MLP decode head.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
Example:
```python
>>> from transformers import SegformerModel, SegformerConfig
>>> # Initializing a SegFormer nvidia/segformer-b0-finetuned-ade-512-512 style configuration
>>> configuration = SegformerConfig()
>>> # Initializing a model from the nvidia/segformer-b0-finetuned-ade-512-512 style configuration
>>> model = SegformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'segformer'
def __init__(self, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[32, 64, 160, 256], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], num_attention_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, classifier_dropout_prob=0.1, initializer_range=0.02, drop_path_rate=0.1, layer_norm_eps=1e-06, decoder_hidden_size=256, semantic_loss_ignore_index=255, **kwargs):
super().__init__(**kwargs)
if 'reshape_last_stage' in kwargs and kwargs['reshape_last_stage'] is False:
warnings.warn('Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be removed, as the behaviour will default to that of reshape_last_stage = True.', FutureWarning)
self.num_channels = num_channels
self.num_encoder_blocks = num_encoder_blocks
self.depths = depths
self.sr_ratios = sr_ratios
self.hidden_sizes = hidden_sizes
self.patch_sizes = patch_sizes
self.strides = strides
self.mlp_ratios = mlp_ratios
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.drop_path_rate = drop_path_rate
self.layer_norm_eps = layer_norm_eps
self.decoder_hidden_size = decoder_hidden_size
self.reshape_last_stage = kwargs.get('reshape_last_stage', True)
self.semantic_loss_ignore_index = semantic_loss_ignore_index
|
class SegformerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`SegformerModel`]. It is used to instantiate an
SegFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SegFormer
[nvidia/segformer-b0-finetuned-ade-512-512](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_encoder_blocks (`int`, *optional*, defaults to 4):
The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
The number of layers in each encoder block.
sr_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
Sequence reduction ratios in each encoder block.
hidden_sizes (`list[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
Dimension of each of the encoder blocks.
patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
Patch size before each encoder block.
strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
Stride before each encoder block.
num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
mlp_ratios (`list[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability before the classification head.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
drop_path_rate (`float`, *optional*, defaults to 0.1):
The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*, defaults to 256):
The dimension of the all-MLP decode head.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
Example:
```python
>>> from transformers import SegformerModel, SegformerConfig
>>> # Initializing a SegFormer nvidia/segformer-b0-finetuned-ade-512-512 style configuration
>>> configuration = SegformerConfig()
>>> # Initializing a model from the nvidia/segformer-b0-finetuned-ade-512-512 style configuration
>>> model = SegformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[32, 64, 160, 256], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], num_attention_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, classifier_dropout_prob=0.1, initializer_range=0.02, drop_path_rate=0.1, layer_norm_eps=1e-06, decoder_hidden_size=256, semantic_loss_ignore_index=255, **kwargs):
pass
| 2
| 1
| 50
| 2
| 48
| 0
| 2
| 1.14
| 1
| 2
| 0
| 0
| 1
| 19
| 1
| 1
| 118
| 11
| 50
| 43
| 27
| 57
| 25
| 22
| 23
| 2
| 1
| 1
| 2
|
5,186
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/configuration_segformer.py
|
transformers.models.segformer.configuration_segformer.SegformerOnnxConfig
|
from collections.abc import Mapping
from collections import OrderedDict
from ...onnx import OnnxConfig
from packaging import version
class SegformerOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
@property
def atol_for_validation(self) -> float:
return 0.0001
@property
def default_onnx_opset(self) -> int:
return 12
|
class SegformerOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
@property
def default_onnx_opset(self) -> int:
pass
| 7
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 3
| 0
| 3
| 3
| 18
| 3
| 15
| 8
| 8
| 0
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
5,187
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/feature_extraction_segformer.py
|
transformers.models.segformer.feature_extraction_segformer.SegformerFeatureExtractor
|
from ...utils.import_utils import requires
import warnings
from .image_processing_segformer import SegformerImageProcessor
@requires(backends=('vision',))
class SegformerFeatureExtractor(SegformerImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use SegformerImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class SegformerFeatureExtractor(SegformerImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 31
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
5,188
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/image_processing_segformer.py
|
transformers.models.segformer.image_processing_segformer.SegformerImageProcessor
|
from typing import Optional, Union
from ...image_processing_utils import INIT_SERVICE_KWARGS, BaseImageProcessor, BatchFeature, get_size_dict
from ...utils import TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, is_vision_available, logging
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...utils.import_utils import requires
from ...image_transforms import resize, to_channel_dimension_format
import numpy as np
@requires(backends=('vision',))
class SegformerImageProcessor(BaseImageProcessor):
"""
Constructs a Segformer image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 512, "width": 512}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
"""
model_input_names = ['pixel_values']
@filter_out_non_signature_kwargs(extra=INIT_SERVICE_KWARGS)
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_reduce_labels: bool=False, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 512, 'width': 512}
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.do_reduce_labels = do_reduce_labels
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
output_size = (size['height'], size['width'])
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
return label
def _preprocess(self, image: ImageInput, do_reduce_labels: bool, do_resize: bool, do_rescale: bool, do_normalize: bool, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, rescale_factor: Optional[float]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
return image
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single image."""
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(image=image, do_reduce_labels=False, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_mask(self, segmentation_map: ImageInput, do_reduce_labels: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single mask."""
segmentation_map = to_numpy_array(segmentation_map)
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
segmentation_map = self._preprocess(image=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, do_rescale=False, do_normalize=False, input_data_format=input_data_format)
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
def __call__(self, images, segmentation_maps=None, **kwargs):
"""
Preprocesses a batch of images and optionally segmentation maps.
Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
passed in as positional arguments.
"""
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after `resize` is applied.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
resample = resample if resample is not None else self.resample
size = size if size is not None else self.size
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
images = [self._preprocess_image(image=img, do_resize=do_resize, resample=resample, size=size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for img in images]
data = {'pixel_values': images}
if segmentation_maps is not None:
segmentation_maps = [self._preprocess_mask(segmentation_map=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, size=size, input_data_format=input_data_format) for segmentation_map in segmentation_maps]
data['labels'] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]]=None):
"""
Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`SegformerForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
|
@requires(backends=('vision',))
class SegformerImageProcessor(BaseImageProcessor):
'''
Constructs a Segformer image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 512, "width": 512}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
'''
@filter_out_non_signature_kwargs(extra=INIT_SERVICE_KWARGS)
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_reduce_labels: bool=False, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
'''
pass
def reduce_label(self, label: ImageInput) -> np.ndarray:
pass
def _preprocess(self, image: ImageInput, do_reduce_labels: bool, do_resize: bool, do_rescale: bool, do_normalize: bool, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, rescale_factor: Optional[float]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
pass
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single image.'''
pass
def _preprocess_mask(self, segmentation_map: ImageInput, do_reduce_labels: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single mask.'''
pass
def __call__(self, images, segmentation_maps=None, **kwargs):
'''
Preprocesses a batch of images and optionally segmentation maps.
Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
passed in as positional arguments.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after `resize` is applied.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]]=None):
'''
Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`SegformerForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
'''
pass
| 13
| 7
| 37
| 2
| 25
| 10
| 4
| 0.54
| 1
| 10
| 2
| 1
| 9
| 9
| 10
| 30
| 423
| 34
| 252
| 101
| 167
| 137
| 105
| 29
| 94
| 13
| 3
| 2
| 41
|
5,189
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegFormerImageClassifierOutput
|
from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput, SemanticSegmenterOutput
from typing import Optional, Union
import torch
class SegFormerImageClassifierOutput(ImageClassifierOutput):
"""
Base class for outputs of image classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
class SegFormerImageClassifierOutput(ImageClassifierOutput):
'''
Base class for outputs of image classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 3
| 5
| 5
| 4
| 17
| 5
| 5
| 4
| 0
| 2
| 0
| 0
|
5,190
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegformerAttention
|
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from torch import nn
class SegformerAttention(nn.Module):
def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
super().__init__()
self.self = SegformerEfficientSelfAttention(config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio)
self.output = SegformerSelfOutput(config, hidden_size=hidden_size)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, height, width, output_attentions=False):
self_outputs = self.self(hidden_states, height, width, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class SegformerAttention(nn.Module):
def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states, height, width, output_attentions=False):
pass
| 4
| 0
| 11
| 1
| 9
| 1
| 1
| 0.1
| 1
| 4
| 2
| 0
| 3
| 3
| 3
| 13
| 36
| 5
| 29
| 11
| 25
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
5,191
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegformerDWConv
|
from torch import nn
class SegformerDWConv(nn.Module):
def __init__(self, dim=768):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, hidden_states, height, width):
batch_size, seq_len, num_channels = hidden_states.shape
hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width)
hidden_states = self.dwconv(hidden_states)
hidden_states = hidden_states.flatten(2).transpose(1, 2)
return hidden_states
|
class SegformerDWConv(nn.Module):
def __init__(self, dim=768):
pass
def forward(self, hidden_states, height, width):
pass
| 3
| 0
| 5
| 1
| 5
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 12
| 2
| 10
| 5
| 7
| 0
| 10
| 5
| 7
| 1
| 1
| 0
| 2
|
5,192
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegformerDecodeHead
|
import torch
import math
from torch import nn
class SegformerDecodeHead(SegformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
mlps = []
for i in range(config.num_encoder_blocks):
mlp = SegformerMLP(config, input_dim=config.hidden_sizes[i])
mlps.append(mlp)
self.linear_c = nn.ModuleList(mlps)
self.linear_fuse = nn.Conv2d(in_channels=config.decoder_hidden_size * config.num_encoder_blocks, out_channels=config.decoder_hidden_size, kernel_size=1, bias=False)
self.batch_norm = nn.BatchNorm2d(config.decoder_hidden_size)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Conv2d(config.decoder_hidden_size, config.num_labels, kernel_size=1)
self.config = config
def forward(self, encoder_hidden_states: torch.FloatTensor) -> torch.Tensor:
batch_size = encoder_hidden_states[-1].shape[0]
all_hidden_states = ()
for encoder_hidden_state, mlp in zip(encoder_hidden_states, self.linear_c):
if self.config.reshape_last_stage is False and encoder_hidden_state.ndim == 3:
height = width = int(math.sqrt(encoder_hidden_state.shape[-1]))
encoder_hidden_state = encoder_hidden_state.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
height, width = (encoder_hidden_state.shape[2], encoder_hidden_state.shape[3])
encoder_hidden_state = mlp(encoder_hidden_state)
encoder_hidden_state = encoder_hidden_state.permute(0, 2, 1)
encoder_hidden_state = encoder_hidden_state.reshape(batch_size, -1, height, width)
encoder_hidden_state = nn.functional.interpolate(encoder_hidden_state, size=encoder_hidden_states[0].size()[2:], mode='bilinear', align_corners=False)
all_hidden_states += (encoder_hidden_state,)
hidden_states = self.linear_fuse(torch.cat(all_hidden_states[::-1], dim=1))
hidden_states = self.batch_norm(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
return logits
|
class SegformerDecodeHead(SegformerPreTrainedModel):
def __init__(self, config):
pass
def forward(self, encoder_hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 0
| 27
| 4
| 21
| 3
| 3
| 0.12
| 1
| 6
| 1
| 0
| 2
| 7
| 2
| 3
| 56
| 9
| 42
| 19
| 39
| 5
| 33
| 19
| 30
| 3
| 2
| 2
| 5
|
5,193
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegformerDropPath
|
from typing import Optional, Union
from torch import nn
import torch
class SegformerDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class SegformerDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
5,194
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegformerEfficientSelfAttention
|
from torch import nn
import math
import torch
class SegformerEfficientSelfAttention(nn.Module):
"""SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT
paper](https://huggingface.co/papers/2102.12122)."""
def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
super().__init__()
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(f'The hidden size ({self.hidden_size}) is not a multiple of the number of attention heads ({self.num_attention_heads})')
self.attention_head_size = int(self.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(self.hidden_size, self.all_head_size)
self.key = nn.Linear(self.hidden_size, self.all_head_size)
self.value = nn.Linear(self.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.sr_ratio = sequence_reduction_ratio
if sequence_reduction_ratio > 1:
self.sr = nn.Conv2d(hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio)
self.layer_norm = nn.LayerNorm(hidden_size)
def forward(self, hidden_states, height, width, output_attentions=False):
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
if self.sr_ratio > 1:
batch_size, seq_len, num_channels = hidden_states.shape
hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
hidden_states = self.sr(hidden_states)
hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1)
hidden_states = self.layer_norm(hidden_states)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class SegformerEfficientSelfAttention(nn.Module):
'''SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT
paper](https://huggingface.co/papers/2102.12122).'''
def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
pass
def forward(self, hidden_states, height, width, output_attentions=False):
pass
| 3
| 1
| 24
| 5
| 17
| 2
| 2
| 0.17
| 1
| 3
| 0
| 0
| 3
| 11
| 3
| 13
| 79
| 18
| 52
| 31
| 42
| 9
| 41
| 25
| 37
| 3
| 1
| 1
| 7
|
5,195
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegformerEncoder
|
import torch
from typing import Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput, SemanticSegmenterOutput
class SegformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
drop_path_decays = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')]
embeddings = []
for i in range(config.num_encoder_blocks):
embeddings.append(SegformerOverlapPatchEmbeddings(patch_size=config.patch_sizes[i], stride=config.strides[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i]))
self.patch_embeddings = nn.ModuleList(embeddings)
blocks = []
cur = 0
for i in range(config.num_encoder_blocks):
layers = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(SegformerLayer(config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=drop_path_decays[cur + j], sequence_reduction_ratio=config.sr_ratios[i], mlp_ratio=config.mlp_ratios[i]))
blocks.append(nn.ModuleList(layers))
self.block = nn.ModuleList(blocks)
self.layer_norm = nn.ModuleList([nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)])
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
batch_size = pixel_values.shape[0]
hidden_states = pixel_values
for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)):
embedding_layer, block_layer, norm_layer = x
hidden_states, height, width = embedding_layer(hidden_states)
for i, blk in enumerate(block_layer):
layer_outputs = blk(hidden_states, height, width, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = norm_layer(hidden_states)
if idx != len(self.patch_embeddings) - 1 or (idx == len(self.patch_embeddings) - 1 and self.config.reshape_last_stage):
hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class SegformerEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutput]:
pass
| 3
| 0
| 44
| 4
| 35
| 5
| 8
| 0.13
| 1
| 9
| 3
| 0
| 2
| 4
| 2
| 12
| 89
| 9
| 71
| 29
| 62
| 9
| 41
| 23
| 38
| 9
| 1
| 3
| 15
|
5,196
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegformerForImageClassification
|
from torch import nn
from ...utils import auto_docstring, logging
from typing import Optional, Union
import torch
@auto_docstring(custom_intro='\n SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden\n states) e.g. for ImageNet.\n ')
class SegformerForImageClassification(SegformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.segformer = SegformerModel(config)
self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels)
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SegFormerImageClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.segformer(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
batch_size = sequence_output.shape[0]
if self.config.reshape_last_stage:
sequence_output = sequence_output.permute(0, 2, 3, 1)
sequence_output = sequence_output.reshape(batch_size, -1, self.config.hidden_sizes[-1])
sequence_output = sequence_output.mean(dim=1)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return SegFormerImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden\n states) e.g. for ImageNet.\n ')
class SegformerForImageClassification(SegformerPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SegFormerImageClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 40
| 6
| 29
| 6
| 7
| 0.17
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 89
| 12
| 66
| 21
| 49
| 11
| 37
| 13
| 34
| 13
| 2
| 3
| 14
|
5,197
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegformerForSemanticSegmentation
|
from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput, SemanticSegmenterOutput
from torch import nn
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss
from ...utils import auto_docstring, logging
from typing import Optional, Union
@auto_docstring(custom_intro='\n SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes.\n ')
class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.segformer = SegformerModel(config)
self.decode_head = SegformerDecodeHead(config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, SegformerForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> image_processor = AutoImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits # shape (batch_size, num_labels, height/4, width/4)
>>> list(logits.shape)
[1, 150, 128, 128]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
if labels is not None and self.config.num_labels < 1:
raise ValueError(f'Number of labels should be >=0: {self.config.num_labels}')
outputs = self.segformer(pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
logits = self.decode_head(encoder_hidden_states)
loss = None
if labels is not None:
upsampled_logits = nn.functional.interpolate(logits, size=labels.shape[-2:], mode='bilinear', align_corners=False)
if self.config.num_labels > 1:
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
loss = loss_fct(upsampled_logits, labels)
elif self.config.num_labels == 1:
valid_mask = ((labels >= 0) & (labels != self.config.semantic_loss_ignore_index)).float()
loss_fct = BCEWithLogitsLoss(reduction='none')
loss = loss_fct(upsampled_logits.squeeze(1), labels.float())
loss = (loss * valid_mask).mean()
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes.\n ')
class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, SegformerForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> image_processor = AutoImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits # shape (batch_size, num_labels, height/4, width/4)
>>> list(logits.shape)
[1, 150, 128, 128]
```'''
pass
| 5
| 1
| 44
| 7
| 26
| 12
| 7
| 0.42
| 1
| 6
| 3
| 0
| 2
| 2
| 2
| 3
| 92
| 15
| 55
| 21
| 43
| 23
| 30
| 13
| 27
| 12
| 2
| 2
| 13
|
5,198
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegformerLayer
|
from torch import nn
class SegformerLayer(nn.Module):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
super().__init__()
self.layer_norm_1 = nn.LayerNorm(hidden_size)
self.attention = SegformerAttention(config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio)
self.drop_path = SegformerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.layer_norm_2 = nn.LayerNorm(hidden_size)
mlp_hidden_size = int(hidden_size * mlp_ratio)
self.mlp = SegformerMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
def forward(self, hidden_states, height, width, output_attentions=False):
self_attention_outputs = self.attention(self.layer_norm_1(hidden_states), height, width, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
attention_output = self.drop_path(attention_output)
hidden_states = attention_output + hidden_states
mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
mlp_output = self.drop_path(mlp_output)
layer_output = mlp_output + hidden_states
outputs = (layer_output,) + outputs
return outputs
|
class SegformerLayer(nn.Module):
'''This corresponds to the Block class in the original implementation.'''
def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
pass
def forward(self, hidden_states, height, width, output_attentions=False):
pass
| 3
| 1
| 19
| 3
| 15
| 2
| 2
| 0.17
| 1
| 5
| 3
| 0
| 2
| 5
| 2
| 12
| 41
| 8
| 30
| 14
| 27
| 5
| 20
| 14
| 17
| 2
| 1
| 0
| 3
|
5,199
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/segformer/modeling_segformer.py
|
transformers.models.segformer.modeling_segformer.SegformerMLP
|
import torch
from .configuration_segformer import SegformerConfig
from torch import nn
class SegformerMLP(nn.Module):
"""
Linear Embedding.
"""
def __init__(self, config: SegformerConfig, input_dim):
super().__init__()
self.proj = nn.Linear(input_dim, config.decoder_hidden_size)
def forward(self, hidden_states: torch.Tensor):
hidden_states = hidden_states.flatten(2).transpose(1, 2)
hidden_states = self.proj(hidden_states)
return hidden_states
|
class SegformerMLP(nn.Module):
'''
Linear Embedding.
'''
def __init__(self, config: SegformerConfig, input_dim):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 1
| 0.38
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 13
| 2
| 8
| 4
| 5
| 3
| 8
| 4
| 5
| 1
| 1
| 0
| 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.