id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
5,700
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.RelativePositionBiasVertical
from torch import Tensor, nn from typing import Any, Optional, Union class RelativePositionBiasVertical(RelativePositionBiasBase): def __init__(self, scaling_factor=100, max_distance=100, **kwargs): """ Represents in the bucket embeddings vertical distance between two tokens. Parameters are the same as in base class """ super().__init__(scaling_factor=scaling_factor, max_distance=max_distance, **kwargs) def prepare_input(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor: if not self.scaling_factor > 1.0: raise ValueError('Need to scale the values of bboxes, as there are in small (0,1) range') if bbox is None: raise ValueError('Bbox is required for vertical relative position bias') vertical_position: Tensor = bbox[:, :, [1, 3]].mean(dim=-1) return self.get_relative_position(vertical_position)
class RelativePositionBiasVertical(RelativePositionBiasBase): def __init__(self, scaling_factor=100, max_distance=100, **kwargs): ''' Represents in the bucket embeddings vertical distance between two tokens. Parameters are the same as in base class ''' pass def prepare_input(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor: pass
3
1
8
1
5
3
2
0.5
1
5
0
0
2
0
2
37
17
2
10
4
7
5
10
4
7
3
5
1
4
5,701
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopAttention
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache import math from ...utils.deprecation import deprecate_kwarg from transformers import UdopConfig from torch import Tensor, nn from typing import Any, Optional, Union class UdopAttention(nn.Module): def __init__(self, config: UdopConfig, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads) self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) max_exact = num_buckets // 2 is_small = relative_position < max_exact relative_position_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long) relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None, cache_position=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device if cache_position is None: context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] else: context_position = cache_position[:, None].to(device) memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_position_bucket(relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance) values = self.relative_attention_bias(relative_position_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) return values @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ batch_size, seq_length = hidden_states.shape[:2] is_cross_attention = key_value_states is not None query_states = self.q(hidden_states) query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) is_updated = False if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k(current_states) value_states = self.v(current_states) key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: key_length = key_states.shape[-2] real_seq_length = query_length if query_length is not None else cache_position[-1] + 1 if not self.has_relative_attention_bias: position_bias = torch.zeros((1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device, cache_position=cache_position) position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: causal_mask = mask[:, :, :, :key_states.shape[-2]] position_bias = position_bias + causal_mask if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(batch_size, -1, self.inner_dim) attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs
class UdopAttention(nn.Module): def __init__(self, config: UdopConfig, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass def prune_heads(self, heads): pass @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): ''' Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) ''' pass def compute_bias(self, query_length, key_length, device=None, cache_position=None): '''Compute binned relative position bias''' pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None): ''' Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). ''' pass
8
3
44
5
32
8
5
0.26
1
5
0
0
4
17
5
15
226
28
160
67
136
42
113
49
107
16
1
3
26
5,702
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopBlock
from ...utils.deprecation import deprecate_kwarg from torch import Tensor, nn from ...modeling_layers import GradientCheckpointingLayer from typing import Any, Optional, Union import torch class UdopBlock(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(UdopLayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx)) if self.is_decoder: self.layer.append(UdopLayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(UdopLayerFF(config)) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None): self_attention_outputs = self.layer[0](hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = self_attention_outputs[0] attention_outputs = self_attention_outputs[1:] if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1](hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions) hidden_states = cross_attention_outputs[0] if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) attention_outputs = attention_outputs + cross_attention_outputs[1:] hidden_states = self.layer[-1](hidden_states) if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) return outputs + attention_outputs
class UdopBlock(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None): pass
4
0
49
5
42
4
4
0.08
1
5
3
0
2
2
2
12
100
11
84
26
66
7
33
11
30
6
1
2
8
5,703
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopCellEmbeddings
from torch import Tensor, nn import torch class UdopCellEmbeddings(nn.Module): def __init__(self, max_2d_position_embeddings=501, hidden_size=1024): super().__init__() self.max_2d_position_embeddings = max_2d_position_embeddings self.x_position_embeddings = nn.Embedding(max_2d_position_embeddings, hidden_size) self.y_position_embeddings = nn.Embedding(max_2d_position_embeddings, hidden_size) def forward(self, bbox): bbox = torch.clip(bbox, 0.0, 1.0) bbox = (bbox * (self.max_2d_position_embeddings - 1)).long() left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) embeddings = left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings return embeddings
class UdopCellEmbeddings(nn.Module): def __init__(self, max_2d_position_embeddings=501, hidden_size=1024): pass def forward(self, bbox): pass
3
0
11
2
10
0
1
0
1
1
0
0
2
3
2
12
24
4
20
11
17
0
15
11
12
1
1
0
2
5,704
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopDenseActDense
from ...activations import ACT2FN import torch from torch import Tensor, nn from transformers import UdopConfig class UdopDenseActDense(nn.Module): def __init__(self, config: UdopConfig): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class UdopDenseActDense(nn.Module): def __init__(self, config: UdopConfig): pass def forward(self, hidden_states): pass
3
0
9
0
9
0
2
0
1
3
0
0
2
4
2
12
20
1
19
7
16
0
15
7
12
2
1
1
3
5,705
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopDenseGatedActDense
from ...activations import ACT2FN import torch from torch import Tensor, nn from transformers import UdopConfig class UdopDenseGatedActDense(nn.Module): def __init__(self, config: UdopConfig): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class UdopDenseGatedActDense(nn.Module): def __init__(self, config: UdopConfig): pass def forward(self, hidden_states): pass
3
0
13
1
10
2
2
0.14
1
3
0
0
2
5
2
12
27
3
21
10
18
3
17
10
14
2
1
1
3
5,706
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopEncoderModel
from typing import Any, Optional, Union from torch import Tensor, nn from copy import deepcopy from transformers import UdopConfig import torch from ...utils import ModelOutput, auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling @auto_docstring class UdopEncoderModel(UdopPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'encoder.embed_patches.proj.weight', 'encoder.embed_patches.proj.bias', 'encoder.relative_bias.biases.0.relative_attention_bias.weight'] def __init__(self, config: UdopConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) self.patch_embed = UdopPatchEmbeddings(config) encoder_config = deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = UdopStack(encoder_config, self.shared, self.patch_embed) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None, attention_mask: Optional[Tensor]=None, pixel_values: Optional[Tensor]=None, visual_bbox: Optional[dict[str, Any]]=None, head_mask: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithAttentionMask]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. visual_bbox (`torch.LongTensor` of shape `(batch_size, patch_sequence_length, 4)`, *optional*): Bounding boxes of each patch in the image. If not provided, bounding boxes are created in the model. Example: ```python >>> from transformers import AutoProcessor, UdopEncoderModel >>> from huggingface_hub import hf_hub_download >>> from datasets import load_dataset >>> # load model and processor >>> # in this case, we already have performed OCR ourselves >>> # so we initialize the processor with `apply_ocr=False` >>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False) >>> model = UdopEncoderModel.from_pretrained("microsoft/udop-large") >>> # load an example image, along with the words and coordinates >>> # which were extracted using an OCR engine >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, words, boxes=boxes, return_tensors="pt") >>> outputs = model(**encoding) >>> last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder(input_ids=input_ids, bbox=bbox, visual_bbox=visual_bbox, pixel_values=pixel_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return encoder_outputs
@auto_docstring class UdopEncoderModel(UdopPreTrainedModel): def __init__(self, config: UdopConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def get_encoder(self): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None, attention_mask: Optional[Tensor]=None, pixel_values: Optional[Tensor]=None, visual_bbox: Optional[dict[str, Any]]=None, head_mask: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithAttentionMask]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. visual_bbox (`torch.LongTensor` of shape `(batch_size, patch_sequence_length, 4)`, *optional*): Bounding boxes of each patch in the image. If not provided, bounding boxes are created in the model. Example: ```python >>> from transformers import AutoProcessor, UdopEncoderModel >>> from huggingface_hub import hf_hub_download >>> from datasets import load_dataset >>> # load model and processor >>> # in this case, we already have performed OCR ourselves >>> # so we initialize the processor with `apply_ocr=False` >>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False) >>> model = UdopEncoderModel.from_pretrained("microsoft/udop-large") >>> # load an example image, along with the words and coordinates >>> # which were extracted using an OCR engine >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, words, boxes=boxes, return_tensors="pt") >>> outputs = model(**encoding) >>> last_hidden_states = outputs.last_hidden_state ```''' pass
9
2
15
2
9
5
2
0.48
1
9
3
0
6
3
6
8
105
16
60
27
39
29
28
14
21
4
2
1
10
5,707
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopForConditionalGeneration
from ...generation import GenerationMixin from copy import deepcopy from torch import Tensor, nn from torch.nn import CrossEntropyLoss import torch from transformers.modeling_outputs import Seq2SeqLMOutput, Seq2SeqModelOutput from ...utils import ModelOutput, auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Any, Optional, Union @auto_docstring(custom_intro='\n The UDOP encoder-decoder Transformer with a language modeling head on top, enabling to generate text given document\n images and an optional prompt.\n\n This class is based on [`T5ForConditionalGeneration`], extended to deal with images and layout (2D) data.\n ') class UdopForConditionalGeneration(UdopPreTrainedModel, GenerationMixin): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'encoder.embed_patches.proj.weight', 'encoder.embed_patches.proj.bias', 'encoder.relative_bias.biases.0.relative_attention_bias.weight', 'decoder.relative_bias.biases.0.relative_attention_bias.weight', 'lm_head.weight'] def __init__(self, config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) self.patch_embed = UdopPatchEmbeddings(config) encoder_config = deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = UdopStack(encoder_config, self.shared, self.patch_embed) decoder_config = deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = UdopStack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None, pixel_values: Optional[Tensor]=None, visual_bbox: Optional[dict[str, Any]]=None, decoder_input_ids: Optional[Tensor]=None, decoder_attention_mask: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, encoder_outputs: Optional[Tensor]=None, past_key_values: Optional[Cache]=None, head_mask: Optional[Tensor]=None, decoder_inputs_embeds: Optional[Tensor]=None, decoder_head_mask: Optional[Tensor]=None, cross_attn_head_mask: Optional[Tensor]=None, use_cache=True, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[Tensor]=None, cache_position: Optional[torch.LongTensor]=None) -> tuple[Tensor, ...]: """ bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. visual_bbox (`torch.LongTensor` of shape `(batch_size, patch_sequence_length, 4)`, *optional*): Bounding boxes of each patch in the image. If not provided, bounding boxes are created in the model. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`. Examples: ```python >>> from transformers import AutoProcessor, UdopForConditionalGeneration >>> from datasets import load_dataset >>> # load model and processor >>> # in this case, we already have performed OCR ourselves >>> # so we initialize the processor with `apply_ocr=False` >>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False) >>> model = UdopForConditionalGeneration.from_pretrained("microsoft/udop-large") >>> # load an example image, along with the words and coordinates >>> # which were extracted using an OCR engine >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> # one can use the various task prefixes (prompts) used during pre-training >>> # e.g. the task prefix for DocVQA is "Question answering. " >>> question = "Question answering. What is the date on the form?" >>> encoding = processor(image, question, text_pair=words, boxes=boxes, return_tensors="pt") >>> # autoregressive generation >>> predicted_ids = model.generate(**encoding) >>> print(processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]) 9/30/92 ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if decoder_input_ids is None and labels is not None: decoder_input_ids = self._shift_right(labels) if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, bbox=bbox, visual_bbox=visual_bbox, pixel_values=pixel_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = encoder_outputs[0] encoder_attention_mask = encoder_outputs.attention_mask if return_dict else encoder_outputs[1] decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: sequence_output = sequence_output * self.config.d_model ** (-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[2:] + (encoder_outputs[0],) + encoder_outputs[2:] return (loss,) + output if loss is not None else output return Seq2SeqLMOutput(loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring(custom_intro='\n The UDOP encoder-decoder Transformer with a language modeling head on top, enabling to generate text given document\n images and an optional prompt.\n\n This class is based on [`T5ForConditionalGeneration`], extended to deal with images and layout (2D) data.\n ') class UdopForConditionalGeneration(UdopPreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None, pixel_values: Optional[Tensor]=None, visual_bbox: Optional[dict[str, Any]]=None, decoder_input_ids: Optional[Tensor]=None, decoder_attention_mask: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, encoder_outputs: Optional[Tensor]=None, past_key_values: Optional[Cache]=None, head_mask: Optional[Tensor]=None, decoder_inputs_embeds: Optional[Tensor]=None, decoder_head_mask: Optional[Tensor]=None, cross_attn_head_mask: Optional[Tensor]=None, use_cache=True, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[Tensor]=None, cache_position: Optional[torch.LongTensor]=None) -> tuple[Tensor, ...]: ''' bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. visual_bbox (`torch.LongTensor` of shape `(batch_size, patch_sequence_length, 4)`, *optional*): Bounding boxes of each patch in the image. If not provided, bounding boxes are created in the model. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`. Examples: ```python >>> from transformers import AutoProcessor, UdopForConditionalGeneration >>> from datasets import load_dataset >>> # load model and processor >>> # in this case, we already have performed OCR ourselves >>> # so we initialize the processor with `apply_ocr=False` >>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False) >>> model = UdopForConditionalGeneration.from_pretrained("microsoft/udop-large") >>> # load an example image, along with the words and coordinates >>> # which were extracted using an OCR engine >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> # one can use the various task prefixes (prompts) used during pre-training >>> # e.g. the task prefix for DocVQA is "Question answering. " >>> question = "Question answering. What is the date on the form?" >>> encoding = processor(image, question, text_pair=words, boxes=boxes, return_tensors="pt") >>> # autoregressive generation >>> predicted_ids = model.generate(**encoding) >>> print(processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]) 9/30/92 ```''' pass
8
1
22
3
14
5
3
0.31
2
9
2
0
9
5
9
11
221
35
142
53
108
44
69
30
59
10
2
2
23
5,708
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopLayerCrossAttention
from typing import Any, Optional, Union from ...utils.deprecation import deprecate_kwarg from torch import Tensor, nn class UdopLayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): super().__init__() self.EncDecAttention = UdopAttention(config, has_relative_attention_bias=False, layer_idx=layer_idx) self.layer_norm = UdopLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention(normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] return outputs
class UdopLayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None): pass
4
0
17
0
17
1
1
0.03
1
4
2
0
2
3
2
12
36
1
35
22
20
1
12
10
9
1
1
0
2
5,709
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopLayerFF
from torch import Tensor, nn from transformers import UdopConfig class UdopLayerFF(nn.Module): def __init__(self, config: UdopConfig): super().__init__() if config.is_gated_act: self.DenseReluDense = UdopDenseGatedActDense(config) else: self.DenseReluDense = UdopDenseActDense(config) self.layer_norm = UdopLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states
class UdopLayerFF(nn.Module): def __init__(self, config: UdopConfig): pass def forward(self, hidden_states): pass
3
0
7
1
7
0
2
0
1
5
3
0
2
3
2
12
16
2
14
7
11
0
13
7
10
2
1
1
3
5,710
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopLayerNorm
import torch from torch import Tensor, nn class UdopLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Construct a layernorm module in the Udop style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states
class UdopLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): ''' Construct a layernorm module in the Udop style. No bias and no subtraction of mean. ''' pass def forward(self, hidden_states): pass
3
1
11
2
5
4
2
0.73
1
1
0
0
2
2
2
12
23
4
11
6
8
8
11
6
8
2
1
1
3
5,711
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopLayerSelfAttention
from ...utils.deprecation import deprecate_kwarg from typing import Any, Optional, Union from torch import Tensor, nn class UdopLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.SelfAttention = UdopAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx) self.layer_norm = UdopLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention(normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] return outputs
class UdopLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None): pass
4
0
16
0
16
1
1
0.03
1
4
2
0
2
3
2
12
34
1
33
19
20
1
12
9
9
1
1
0
2
5,712
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopModel
from torch import Tensor, nn from copy import deepcopy import torch from transformers.modeling_outputs import Seq2SeqLMOutput, Seq2SeqModelOutput from typing import Any, Optional, Union from ...utils import ModelOutput, auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache @auto_docstring class UdopModel(UdopPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'encoder.embed_patches.proj.weight', 'encoder.embed_patches.proj.bias', 'encoder.relative_bias.biases.0.relative_attention_bias.weight', 'decoder.relative_bias.biases.0.relative_attention_bias.weight'] def __init__(self, config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) self.patch_embed = UdopPatchEmbeddings(config) encoder_config = deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = UdopStack(encoder_config, self.shared, self.patch_embed) decoder_config = deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = UdopStack(decoder_config, self.shared) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None, pixel_values: Optional[Tensor]=None, visual_bbox: Optional[dict[str, Any]]=None, decoder_input_ids: Optional[Tensor]=None, decoder_attention_mask: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, encoder_outputs: Optional[Tensor]=None, past_key_values: Optional[Cache]=None, head_mask: Optional[Tensor]=None, decoder_inputs_embeds: Optional[Tensor]=None, decoder_head_mask: Optional[Tensor]=None, cross_attn_head_mask: Optional[Tensor]=None, use_cache=True, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> tuple[Tensor, ...]: """ bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. visual_bbox (`torch.LongTensor` of shape `(batch_size, patch_sequence_length, 4)`, *optional*): Bounding boxes of each patch in the image. If not provided, bounding boxes are created in the model. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoProcessor, AutoModel >>> from datasets import load_dataset >>> import torch >>> # load model and processor >>> # in this case, we already have performed OCR ourselves >>> # so we initialize the processor with `apply_ocr=False` >>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False) >>> model = AutoModel.from_pretrained("microsoft/udop-large") >>> # load an example image, along with the words and coordinates >>> # which were extracted using an OCR engine >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> inputs = processor(image, words, boxes=boxes, return_tensors="pt") >>> decoder_input_ids = torch.tensor([[model.config.decoder_start_token_id]]) >>> # forward pass >>> outputs = model(**inputs, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 1, 1024] ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, bbox=bbox, pixel_values=pixel_values, visual_bbox=visual_bbox, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = encoder_outputs[0] encoder_attention_mask = encoder_outputs.attention_mask if return_dict else encoder_outputs[1] decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: decoder_outputs = tuple((value for idx, value in enumerate(decoder_outputs) if idx != 1)) encoder_outputs = tuple((value for idx, value in enumerate(encoder_outputs) if idx != 1)) return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class UdopModel(UdopPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None, pixel_values: Optional[Tensor]=None, visual_bbox: Optional[dict[str, Any]]=None, decoder_input_ids: Optional[Tensor]=None, decoder_attention_mask: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, encoder_outputs: Optional[Tensor]=None, past_key_values: Optional[Cache]=None, head_mask: Optional[Tensor]=None, decoder_inputs_embeds: Optional[Tensor]=None, decoder_head_mask: Optional[Tensor]=None, cross_attn_head_mask: Optional[Tensor]=None, use_cache=True, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> tuple[Tensor, ...]: ''' bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. visual_bbox (`torch.LongTensor` of shape `(batch_size, patch_sequence_length, 4)`, *optional*): Bounding boxes of each patch in the image. If not provided, bounding boxes are created in the model. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoProcessor, AutoModel >>> from datasets import load_dataset >>> import torch >>> # load model and processor >>> # in this case, we already have performed OCR ourselves >>> # so we initialize the processor with `apply_ocr=False` >>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False) >>> model = AutoModel.from_pretrained("microsoft/udop-large") >>> # load an example image, along with the words and coordinates >>> # which were extracted using an OCR engine >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> inputs = processor(image, words, boxes=boxes, return_tensors="pt") >>> decoder_input_ids = torch.tensor([[model.config.decoder_start_token_id]]) >>> # forward pass >>> outputs = model(**inputs, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 1, 1024] ```''' pass
8
1
23
3
16
5
2
0.31
1
10
2
0
6
4
6
8
157
21
104
40
74
32
40
17
33
6
2
1
11
5,713
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopPatchEmbeddings
import collections from torch import Tensor, nn class UdopPatchEmbeddings(nn.Module): """2D Image to Patch Embeddings""" def __init__(self, config): super().__init__() image_size, patch_size = (config.image_size, config.patch_size) num_channels, hidden_size = (config.num_channels, config.hidden_size) image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.proj = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values): batch_size, num_channels, height, width = pixel_values.shape if height != self.image_size[0] or width != self.image_size[1]: raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).") embeddings = self.proj(pixel_values) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings
class UdopPatchEmbeddings(nn.Module): '''2D Image to Patch Embeddings''' def __init__(self, config): pass def forward(self, pixel_values): pass
3
1
12
1
11
0
3
0.04
1
2
0
0
2
5
2
12
28
4
23
13
20
1
20
13
17
3
1
1
5
5,714
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopPreTrainedModel
import torch from torch import Tensor, nn from ...modeling_utils import PreTrainedModel from transformers import UdopConfig from ...utils import ModelOutput, auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling @auto_docstring class UdopPreTrainedModel(PreTrainedModel): config: UdopConfig base_model_prefix = 'transformer' supports_gradient_checkpointing = True _can_compile_fullgraph = False _keep_in_fp32_modules = ['wo'] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, UdopLayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.Conv2d): module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=factor).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, RelativePositionBiasBase): factor = self.config.initializer_factor d_model = self.config.d_model module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) elif isinstance(module, UdopModel): module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, UdopForConditionalGeneration): if hasattr(module, 'lm_head') and (not self.config.tie_word_embeddings): module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, UdopDenseActDense): module.wi.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi, 'bias') and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5)) if hasattr(module.wo, 'bias') and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, UdopDenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi_0, 'bias') and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi_1, 'bias') and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5)) if hasattr(module.wo, 'bias') and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, UdopAttention): d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * (n_heads * key_value_proj_dim) ** (-0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, 'self.model.config.decoder_start_token_id has to be defined. In Udop it is usually set to the pad_token_id. See Udop docs for more information' shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, 'self.model.config.pad_token_id has to be defined.' shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), 'Verify that `shifted_input_ids` has only positive values' return shifted_input_ids
@auto_docstring class UdopPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass def _shift_right(self, input_ids): pass
4
1
41
3
32
7
10
0.25
1
7
7
4
2
0
2
2
96
8
71
16
68
18
58
16
55
19
1
2
20
5,715
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
transformers.models.udop.modeling_udop.UdopStack
from transformers import UdopConfig from ...modeling_attn_mask_utils import AttentionMaskConverter from ...utils import ModelOutput, auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Any, Optional, Union import torch from torch import Tensor, nn class UdopStack(UdopPreTrainedModel): """ This class is based on `T5Stack`, but modified to take into account the image modality as well as 2D position embeddings. """ def __init__(self, config, embed_tokens=None, embed_patches=None): super().__init__(config) self.embed_tokens = embed_tokens self.embed_patches = embed_patches self.is_decoder = config.is_decoder self._max_length = config.max_length self.num_layers = config.num_layers self.block = nn.ModuleList([UdopBlock(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(self.num_layers)]) self.final_layer_norm = UdopLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) if not self.is_decoder: self.cell_2d_embedding = UdopCellEmbeddings(config.max_2d_position_embeddings, config.hidden_size) self.relative_bias = self._get_relative_bias(config) def _tie_weights(self): for bias in self.relative_bias.biases: if isinstance(bias, RelativePositionBias1D): self._tie_or_clone_weights(bias.relative_attention_bias, self.block[0].layer[0].SelfAttention.relative_attention_bias) @staticmethod def _get_relative_bias(config: UdopConfig) -> RelativePositionBiasAggregated: relative_bias_list = create_relative_bias(config) return RelativePositionBiasAggregated(relative_bias_list) def get_output_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward(self, input_ids=None, attention_mask=None, bbox=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, pixel_values=None, visual_bbox=None, image_embeddings=None, position_bias=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time') elif input_ids is not None and torch.numel(input_ids) > 0: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is None and input_ids is not None and (torch.numel(input_ids) == 0): input_ids = torch.full((4, 1024), self.config.pad_token_id, device=input_ids.device, dtype=input_ids.dtype) attention_mask = torch.zeros((4, 1024), device=input_ids.device, dtype=input_ids.dtype) bbox = torch.zeros((4, 1024, 4), device=input_ids.device, dtype=input_ids.dtype) input_shape = input_ids.size() position_bias = torch.zeros_like(self.get_extended_attention_mask(attention_mask, input_shape)) logger.warning('Empty batch') elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds') if inputs_embeds is None: if self.embed_tokens is None: raise ValueError('You have to initialize the model with valid token embeddings') inputs_embeds = self.embed_tokens(input_ids) if pixel_values is not None: image_embeddings = self.embed_patches(pixel_values) if image_embeddings is not None: num_patches = self.config.image_size // self.config.patch_size inputs_embeds, bbox, attention_mask = combine_image_text_embeddings(image_embeddings, inputs_embeds, bbox, visual_bbox, attention_mask, num_patches, 0, self.config.image_size, self.config.patch_size) input_shape = inputs_embeds.size()[:-1] if not self.is_decoder and bbox is not None: inputs_embeds += self.cell_2d_embedding(bbox) batch_size, seq_length = input_shape if use_cache is True: assert self.is_decoder, f'`use_cache` can only be set to `True` if {self} is used as a decoder' if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) else: past_key_values = DynamicCache(config=self.config) elif not self.is_decoder: past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if attention_mask is None and (not is_torchdynamo_compiling()): mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.config.is_decoder: causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions) else: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min if self.is_decoder and encoder_attention_mask is not None: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.num_layers) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.is_decoder else None if self.is_decoder: position_bias = None else: position_bias = self.relative_bias(attention_mask=attention_mask, bbox=bbox) position_bias = position_bias + causal_mask encoder_decoder_position_bias = None hidden_states = inputs_embeds hidden_states = self.dropout(hidden_states) for i, layer_module in enumerate(self.block): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask=head_mask[i], past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = layer_outputs[0] position_bias = layer_outputs[1] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, attention_mask, past_key_values, all_hidden_states, all_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithAttentionMask(last_hidden_state=hidden_states, attention_mask=attention_mask, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
class UdopStack(UdopPreTrainedModel): ''' This class is based on `T5Stack`, but modified to take into account the image modality as well as 2D position embeddings. ''' def __init__(self, config, embed_tokens=None, embed_patches=None): pass def _tie_weights(self): pass @staticmethod def _get_relative_bias(config: UdopConfig) -> RelativePositionBiasAggregated: pass def get_output_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def forward(self, input_ids=None, attention_mask=None, bbox=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, pixel_values=None, visual_bbox=None, image_embeddings=None, position_bias=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass
11
2
42
4
32
5
7
0.19
1
20
11
0
7
10
9
11
395
49
295
90
247
55
159
52
149
44
2
3
66
5,716
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/processing_udop.py
transformers.models.udop.processing_udop.UdopProcessor
from ...image_processing_utils import BatchFeature from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...image_utils import ImageInput from typing import Optional, Union class UdopProcessor(ProcessorMixin): """ Constructs a UDOP processor which combines a LayoutLMv3 image processor and a UDOP tokenizer into a single processor. [`UdopProcessor`] offers all the functionalities you need to prepare data for the model. It first uses [`LayoutLMv3ImageProcessor`] to resize, rescale and normalize document images, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to [`UdopTokenizer`] or [`UdopTokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned into token-level `labels` for token classification tasks (such as FUNSD, CORD). Additionally, it also supports passing `text_target` and `text_pair_target` to the tokenizer, which can be used to prepare labels for language modeling tasks. Args: image_processor (`LayoutLMv3ImageProcessor`): An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input. tokenizer (`UdopTokenizer` or `UdopTokenizerFast`): An instance of [`UdopTokenizer`] or [`UdopTokenizerFast`]. The tokenizer is a required input. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = 'LayoutLMv3ImageProcessor' tokenizer_class = ('UdopTokenizer', 'UdopTokenizerFast') def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[UdopProcessorKwargs]) -> BatchFeature: """ This method first forwards the `images` argument to [`~UdopImageProcessor.__call__`]. In case [`UdopImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and bounding boxes along with the additional arguments to [`~UdopTokenizer.__call__`] and returns the output, together with the prepared `pixel_values`. In case [`UdopImageProcessor`] was initialized with `apply_ocr` set to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional arguments to [`~UdopTokenizer.__call__`] and returns the output, together with the prepared `pixel_values`. Alternatively, one can pass `text_target` and `text_pair_target` to prepare the targets of UDOP. Please refer to the docstring of the above two methods for more information. """ output_kwargs = self._merge_kwargs(UdopProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) boxes = output_kwargs['text_kwargs'].pop('boxes', None) word_labels = output_kwargs['text_kwargs'].pop('word_labels', None) text_pair = output_kwargs['text_kwargs'].pop('text_pair', None) return_overflowing_tokens = output_kwargs['text_kwargs'].get('return_overflowing_tokens', False) return_offsets_mapping = output_kwargs['text_kwargs'].get('return_offsets_mapping', False) text_target = output_kwargs['text_kwargs'].get('text_target', None) if self.image_processor.apply_ocr and boxes is not None: raise ValueError('You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.') if self.image_processor.apply_ocr and word_labels is not None: raise ValueError('You cannot provide word labels if you initialized the image processor with apply_ocr set to True.') if return_overflowing_tokens and (not return_offsets_mapping): raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.') if text_target is not None: return self.tokenizer(**output_kwargs['text_kwargs']) else: features = self.image_processor(images=images, **output_kwargs['images_kwargs']) features_words = features.pop('words', None) features_boxes = features.pop('boxes', None) output_kwargs['text_kwargs'].pop('text_target', None) output_kwargs['text_kwargs'].pop('text_pair_target', None) output_kwargs['text_kwargs']['text_pair'] = text_pair output_kwargs['text_kwargs']['boxes'] = boxes if boxes is not None else features_boxes output_kwargs['text_kwargs']['word_labels'] = word_labels if text is not None and self.image_processor.apply_ocr and (text_pair is None): if isinstance(text, str): text = [text] output_kwargs['text_kwargs']['text_pair'] = features_words encoded_inputs = self.tokenizer(text=text if text is not None else features_words, **output_kwargs['text_kwargs']) if return_overflowing_tokens is True: features['pixel_values'] = self.get_overflowing_images(features['pixel_values'], encoded_inputs['overflow_to_sample_mapping']) features.update(encoded_inputs) return features def get_overflowing_images(self, images, overflow_to_sample_mapping): images_with_overflow = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx]) if len(images_with_overflow) != len(overflow_to_sample_mapping): raise ValueError(f'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}') return images_with_overflow @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(tokenizer_input_names + image_processor_input_names + ['bbox'])
class UdopProcessor(ProcessorMixin): ''' Constructs a UDOP processor which combines a LayoutLMv3 image processor and a UDOP tokenizer into a single processor. [`UdopProcessor`] offers all the functionalities you need to prepare data for the model. It first uses [`LayoutLMv3ImageProcessor`] to resize, rescale and normalize document images, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to [`UdopTokenizer`] or [`UdopTokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned into token-level `labels` for token classification tasks (such as FUNSD, CORD). Additionally, it also supports passing `text_target` and `text_pair_target` to the tokenizer, which can be used to prepare labels for language modeling tasks. Args: image_processor (`LayoutLMv3ImageProcessor`): An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input. tokenizer (`UdopTokenizer` or `UdopTokenizerFast`): An instance of [`UdopTokenizer`] or [`UdopTokenizerFast`]. The tokenizer is a required input. ''' def __init__(self, image_processor, tokenizer): pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[UdopProcessorKwargs]) -> BatchFeature: ''' This method first forwards the `images` argument to [`~UdopImageProcessor.__call__`]. In case [`UdopImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and bounding boxes along with the additional arguments to [`~UdopTokenizer.__call__`] and returns the output, together with the prepared `pixel_values`. In case [`UdopImageProcessor`] was initialized with `apply_ocr` set to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional arguments to [`~UdopTokenizer.__call__`] and returns the output, together with the prepared `pixel_values`. Alternatively, one can pass `text_target` and `text_pair_target` to prepare the targets of UDOP. Please refer to the docstring of the above two methods for more information. ''' pass def get_overflowing_images(self, images, overflow_to_sample_mapping): pass @property def model_input_names(self): pass
6
2
20
3
13
5
3
0.63
1
5
2
0
6
0
6
23
159
26
82
33
66
52
53
24
46
10
2
3
17
5,717
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/processing_udop.py
transformers.models.udop.processing_udop.UdopProcessorKwargs
from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack class UdopProcessorKwargs(ProcessingKwargs, total=False): text_kwargs: UdopTextKwargs _defaults = {'text_kwargs': {'add_special_tokens': True, 'padding': False, 'truncation': False, 'stride': 0, 'return_overflowing_tokens': False, 'return_special_tokens_mask': False, 'return_offsets_mapping': False, 'return_length': False, 'verbose': True}, 'images_kwargs': {}}
class UdopProcessorKwargs(ProcessingKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
16
0
16
2
15
0
3
2
2
0
3
0
0
5,718
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/processing_udop.py
transformers.models.udop.processing_udop.UdopTextKwargs
from typing import Optional, Union from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack class UdopTextKwargs(TextKwargs, total=False): word_labels: Optional[Union[list[int], list[list[int]]]] boxes: Union[list[list[int]], list[list[list[int]]]]
class UdopTextKwargs(TextKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
3
0
3
1
2
0
3
1
2
0
2
0
0
5,719
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/tokenization_udop.py
transformers.models.udop.tokenization_udop.UdopTokenizer
import warnings import sentencepiece as spm import os from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy from ...tokenization_utils import PreTrainedTokenizer from shutil import copyfile from ...utils.import_utils import requires from typing import Any, Optional, Union import re from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging @requires(backends=('sentencepiece',)) class UdopTokenizer(PreTrainedTokenizer): """ Adapted from [`LayoutXLMTokenizer`] and [`T5Tokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. legacy (`bool`, *optional*, defaults to `True`): Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622 which includes fixes to properly handle tokens that appear after special tokens. A simple example: - `legacy=True`: ```python >>> from transformers import T5Tokenizer >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True) >>> tokenizer.encode("Hello <extra_id_0>.") [8774, 32099, 3, 5, 1] ``` - `legacy=False`: ```python >>> from transformers import T5Tokenizer >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False) >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here [8774, 32099, 5, 1] ``` Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*, defaults to `True`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, eos_token='</s>', unk_token='<unk>', sep_token='</s>', pad_token='<pad>', sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, additional_special_tokens=None, sp_model_kwargs: Optional[dict[str, Any]]=None, legacy=True, add_prefix_space=True, **kwargs) -> None: eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token self.legacy = legacy self.add_prefix_space = add_prefix_space self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword super().__init__(eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, legacy=legacy, add_prefix_space=add_prefix_space, **kwargs) @property def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [0] * len(token_ids_0) + [1] return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] def get_sentinel_tokens(self): return list(set(filter(lambda x: bool(re.search('<extra_id_\\d+>', x)) is not None, self.additional_special_tokens))) def get_sentinel_token_ids(self): return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()] def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]: """Do not add eos again if user already added it.""" if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn(f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added.') return token_ids else: return token_ids + [self.eos_token_id] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0] def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ token_ids_0 = self._add_eos_if_not_present(token_ids_0) if token_ids_1 is None: return token_ids_0 else: token_ids_1 = self._add_eos_if_not_present(token_ids_1) return token_ids_0 + token_ids_1 def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__.update(d) self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def tokenize(self, text: 'TextInput', **kwargs) -> list[str]: """ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the first token is special. """ if self.legacy or len(text) == 0: return super().tokenize(text, **kwargs) text = text.replace(SPIECE_UNDERLINE, ' ') if self.add_prefix_space: text = SPIECE_UNDERLINE + text tokens = super().tokenize(text, **kwargs) if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and (tokens[1] in self.all_special_tokens): tokens = tokens[1:] return tokens def _tokenize(self, text, **kwargs): """ Returns a tokenized string. We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`. `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`. """ if self.legacy or not text.startswith((SPIECE_UNDERLINE, ' ')): return self.sp_model.encode(text, out_type=str) tokens = self.sp_model.encode(self.unk_token + text, out_type=str) return tokens[self.unk_token_length:] if len(tokens) >= self.unk_token_length else tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space: tokens[0] = tokens[0][1:] current_sub_tokens = [] out_string = '' prev_is_special = False for token in tokens: if token in self.all_special_tokens: if not prev_is_special: out_string += ' ' out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, **kwargs) -> BatchEncoding: if text is None and text_target is None: raise ValueError('You need to specify either `text` or `text_target`.') if text is not None: if not self._in_target_context_manager: self._switch_to_input_mode() encodings = self.call_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, **kwargs) if text_target is not None: self._switch_to_target_mode() target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **kwargs) self._switch_to_input_mode() if text_target is None: return encodings elif text is None: return target_encodings else: encodings['labels'] = target_encodings['input_ids'] return encodings def call_boxes(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`list[list[int]]`, `list[list[list[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`list[int]`, `list[list[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ def _is_valid_text_input(t): if isinstance(t, str): return True elif isinstance(t, (list, tuple)): if len(t) == 0: return True elif isinstance(t[0], str): return True elif isinstance(t[0], (list, tuple)): return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: if not _is_valid_text_input(text): raise ValueError('text input must of type `str` (single example) or `list[str]` (batch of examples). ') if not isinstance(text_pair, (list, tuple)): raise ValueError('words must of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).') elif not isinstance(text, (list, tuple)): raise ValueError('Words must of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).') if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError('You must provide corresponding bounding boxes') if is_batched: if len(words) != len(boxes): raise ValueError('You must provide words and boxes for an equal amount of examples') for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError('You must provide as many words as there are bounding boxes') elif len(words) != len(boxes): raise ValueError('You must provide as many words as there are bounding boxes') if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.') batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus_boxes(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.encode_plus_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def batch_encode_plus_boxes(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. Args: batch_text_or_text_pairs (`list[str]`, `list[tuple[str, str]]`, `list[list[str]]`, `list[tuple[list[str], list[str]]]`, and for not-fast tokenizers, also `list[list[int]]`, `list[tuple[list[int], list[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in `encode_plus`). """ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._batch_encode_plus_boxes(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def encode_boxes(self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> list[int]: """ Args: Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. text (`str`, `list[str]` or `list[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ encoded_inputs = self.encode_plus_boxes(text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, return_tensors=return_tensors, **kwargs) return encoded_inputs['input_ids'] def encode_plus_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: text (`str`, `list[str]` or (for non-fast tokenizers) `list[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._encode_plus_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _batch_encode_plus_boxes(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.') batch_outputs = self._batch_prepare_for_model_boxes(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose) return BatchEncoding(batch_outputs) @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def _batch_prepare_for_model_boxes(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model_boxes(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs def _encode_plus_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674') return self.prepare_for_model_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose) @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def prepare_for_model_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into token-level `labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (`str`, `list[str]`, `list[list[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) tokens = [] pair_tokens = [] token_boxes = [] pair_token_boxes = [] labels = [] if text_pair is None: if word_labels is None: for word, box in zip(text, boxes): if len(word) < 1: continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) else: for word, box, label in zip(text, boxes, word_labels): if len(word) < 1: continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) if self.only_label_first_subword: labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: tokens = self.tokenize(text) token_boxes = [self.pad_token_box for _ in range(len(tokens))] for word, box in zip(text_pair, boxes): if len(word) < 1: continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) pair_token_boxes.extend([box] * len(word_tokens)) ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and (total_len > max_length): ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels = self.truncate_sequences(ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride) if return_token_type_ids and (not add_special_tokens): raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.') if return_token_type_ids is None: return_token_type_ids = 'token_type_ids' in self.model_input_names if return_attention_mask is None: return_attention_mask = 'attention_mask' in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs['overflowing_tokens'] = overflowing_tokens encoded_inputs['overflowing_token_boxes'] = overflowing_token_boxes encoded_inputs['overflowing_labels'] = overflowing_labels encoded_inputs['num_truncated_tokens'] = total_len - max_length if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) token_boxes = token_boxes + [self.sep_token_box] if pair_token_boxes: pair_token_boxes = pair_token_boxes + [self.sep_token_box] if labels: labels = labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) encoded_inputs['input_ids'] = sequence encoded_inputs['bbox'] = token_boxes + pair_token_boxes if return_token_type_ids: encoded_inputs['token_type_ids'] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs['special_tokens_mask'] = [0] * len(sequence) if labels: encoded_inputs['labels'] = labels self._eventual_warn_about_too_long_sequence(encoded_inputs['input_ids'], max_length, verbose) if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad(encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask) if return_length: encoded_inputs['length'] = len(encoded_inputs['input_ids']) batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis) return batch_outputs def truncate_sequences(self, ids: list[int], token_boxes: list[list[int]], pair_ids: Optional[list[int]]=None, pair_token_boxes: Optional[list[list[int]]]=None, labels: Optional[list[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[str, TruncationStrategy]='longest_first', stride: int=0) -> tuple[list[int], list[int], list[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (`list[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. token_boxes (`list[list[int]]`): Bounding boxes of the first sequence. pair_ids (`list[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_token_boxes (`list[list[int]]`, *optional*): Bounding boxes of the second sequence. labels (`list[int]`, *optional*): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `tuple[list[int], list[int], list[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. """ if num_tokens_to_remove <= 0: return (ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []) if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.LONGEST_FIRST: for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): if not overflowing_tokens: window_len = min(len(ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(ids[-window_len:]) overflowing_token_boxes.extend(token_boxes[-window_len:]) overflowing_labels.extend(labels[-window_len:]) ids = ids[:-1] token_boxes = token_boxes[:-1] labels = labels[:-1] else: if not overflowing_tokens: window_len = min(len(pair_ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(pair_ids[-window_len:]) overflowing_token_boxes.extend(pair_token_boxes[-window_len:]) pair_ids = pair_ids[:-1] pair_token_boxes = pair_token_boxes[:-1] elif truncation_strategy == TruncationStrategy.ONLY_FIRST: if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_token_boxes = token_boxes[-window_len:] overflowing_labels = labels[-window_len:] ids = ids[:-num_tokens_to_remove] token_boxes = token_boxes[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: logger.error(f"We need to remove {num_tokens_to_remove} to truncate the input but the first sequence has a length {len(ids)}. Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_second'.") elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_token_boxes = pair_token_boxes[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] else: logger.error(f"We need to remove {num_tokens_to_remove} to truncate the input but the second sequence has a length {len(pair_ids)}. Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_first'.") return (ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels) def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ if return_attention_mask is None: return_attention_mask = 'attention_mask' in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length if return_attention_mask and 'attention_mask' not in encoded_inputs: encoded_inputs['attention_mask'] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == 'right': if return_attention_mask: encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference if 'token_type_ids' in encoded_inputs: encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference if 'bbox' in encoded_inputs: encoded_inputs['bbox'] = encoded_inputs['bbox'] + [self.pad_token_box] * difference if 'labels' in encoded_inputs: encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference if 'special_tokens_mask' in encoded_inputs: encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == 'left': if return_attention_mask: encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask'] if 'token_type_ids' in encoded_inputs: encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids'] if 'bbox' in encoded_inputs: encoded_inputs['bbox'] = [self.pad_token_box] * difference + encoded_inputs['bbox'] if 'labels' in encoded_inputs: encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels'] if 'special_tokens_mask' in encoded_inputs: encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask'] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError('Invalid padding strategy:' + str(padding_side)) return encoded_inputs
@requires(backends=('sentencepiece',)) class UdopTokenizer(PreTrainedTokenizer): ''' Adapted from [`LayoutXLMTokenizer`] and [`T5Tokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. legacy (`bool`, *optional*, defaults to `True`): Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622 which includes fixes to properly handle tokens that appear after special tokens. A simple example: - `legacy=True`: ```python >>> from transformers import T5Tokenizer >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True) >>> tokenizer.encode("Hello <extra_id_0>.") [8774, 32099, 3, 5, 1] ``` - `legacy=False`: ```python >>> from transformers import T5Tokenizer >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False) >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here [8774, 32099, 5, 1] ``` Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*, defaults to `True`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). ''' def __init__(self, vocab_file, eos_token='</s>', unk_token='<unk>', sep_token='</s>', pad_token='<pad>', sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, additional_special_tokens=None, sp_model_kwargs: Optional[dict[str, Any]]=None, legacy=True, add_prefix_space=True, **kwargs) -> None: pass @property def vocab_size(self): pass def get_vocab(self): pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def get_sentinel_tokens(self): pass def get_sentinel_token_ids(self): pass def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]: '''Do not add eos again if user already added it.''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def __getstate__(self): pass def __setstate__(self, d): pass def tokenize(self, text: 'TextInput', **kwargs) -> list[str]: ''' Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the first token is special. ''' pass def _tokenize(self, text, **kwargs): ''' Returns a tokenized string. We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`. `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`. ''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, **kwargs) -> BatchEncoding: pass def call_boxes(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`list[list[int]]`, `list[list[list[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`list[int]`, `list[list[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). ''' pass def _is_valid_text_input(t): pass def batch_encode_plus_boxes(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. Args: batch_text_or_text_pairs (`list[str]`, `list[tuple[str, str]]`, `list[list[str]]`, `list[tuple[list[str], list[str]]]`, and for not-fast tokenizers, also `list[list[int]]`, `list[tuple[list[int], list[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in `encode_plus`). ''' pass def encode_boxes(self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> list[int]: ''' Args: Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. text (`str`, `list[str]` or `list[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). ''' pass def encode_plus_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' Tokenize and prepare for the model a sequence or a pair of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: text (`str`, `list[str]` or (for non-fast tokenizers) `list[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). ''' pass def _batch_encode_plus_boxes(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def _batch_prepare_for_model_boxes(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding: ''' Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs ''' pass def _encode_plus_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def prepare_for_model_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding: ''' Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into token-level `labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (`str`, `list[str]`, `list[list[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). ''' pass def truncate_sequences(self, ids: list[int], token_boxes: list[list[int]], pair_ids: Optional[list[int]]=None, pair_token_boxes: Optional[list[list[int]]]=None, labels: Optional[list[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[str, TruncationStrategy]='longest_first', stride: int=0) -> tuple[list[int], list[int], list[int]]: ''' Truncates a sequence pair in-place following the strategy. Args: ids (`list[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. token_boxes (`list[list[int]]`): Bounding boxes of the first sequence. pair_ids (`list[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_token_boxes (`list[list[int]]`, *optional*): Bounding boxes of the second sequence. labels (`list[int]`, *optional*): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `tuple[list[int], list[int], list[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. ''' pass def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict: ''' Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) ''' pass
35
18
42
3
31
8
5
0.35
1
17
2
0
28
9
28
117
1,336
131
896
331
635
317
355
98
325
29
3
4
143
5,720
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/tokenization_udop_fast.py
transformers.models.udop.tokenization_udop_fast.UdopTokenizerFast
import os from ...tokenization_utils_base import BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy from ...tokenization_utils_fast import PreTrainedTokenizerFast from shutil import copyfile from typing import Optional, Union from ...utils import PaddingStrategy, TensorType, add_end_docstrings, is_sentencepiece_available, logging class UdopTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" UDOP tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`LayoutXLMTokenizer`] and [`T5Tokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): Path to the vocabulary file. tokenizer_file (`str`, *optional*): Path to the tokenizer file. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = UdopTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, eos_token='</s>', sep_token='</s>', unk_token='<unk>', pad_token='<pad>', sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, additional_special_tokens=None, **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, eos_token=eos_token, sep_token=sep_token, unk_token=unk_token, pad_token=pad_token, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, additional_special_tokens=additional_special_tokens, **kwargs) self.vocab_file = vocab_file self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, **kwargs) -> BatchEncoding: if text is None and text_target is None: raise ValueError('You need to specify either `text` or `text_target`.') if text is not None: if not self._in_target_context_manager: self._switch_to_input_mode() encodings = self.call_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, **kwargs) if text_target is not None: self._switch_to_target_mode() target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **kwargs) self._switch_to_input_mode() if text_target is None: return encodings elif text is None: return target_encodings else: encodings['labels'] = target_encodings['input_ids'] return encodings @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def call_boxes(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`list[list[int]]`, `list[list[list[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`list[int]`, `list[list[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ def _is_valid_text_input(t): if isinstance(t, str): return True elif isinstance(t, (list, tuple)): if len(t) == 0: return True elif isinstance(t[0], str): return True elif isinstance(t[0], (list, tuple)): return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: if not _is_valid_text_input(text): raise ValueError('text input must of type `str` (single example) or `list[str]` (batch of examples). ') if not isinstance(text_pair, (list, tuple)): raise ValueError('words must of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).') elif not isinstance(text, (list, tuple)): raise ValueError('Words must of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).') if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError('You must provide corresponding bounding boxes') if is_batched: if len(words) != len(boxes): raise ValueError('You must provide words and boxes for an equal amount of examples') for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError('You must provide as many words as there are bounding boxes') elif len(words) != len(boxes): raise ValueError('You must provide as many words as there are bounding boxes') if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.') batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus_boxes(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.encode_plus_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]: batched_input = [(text, pair)] if pair else [text] self._tokenizer.encode_special_tokens = kwargs.pop('split_special_tokens', self._tokenizer.encode_special_tokens) encodings = self._tokenizer.encode_batch(batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs) return encodings[0].tokens def batch_encode_plus_boxes(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: batch_text_or_text_pairs (`list[str]`, `list[tuple[str, str]]`, `list[list[str]]`, `list[tuple[list[str], list[str]]]`, and for not-fast tokenizers, also `list[list[int]]`, `list[tuple[list[int], list[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in `encode_plus`). """ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._batch_encode_plus_boxes(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _batch_encode_plus_boxes(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, list): raise TypeError(f'batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})') self.set_truncation_and_padding(padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side) if is_pair: batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs] encodings = self._tokenizer.encode_batch(batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True) tokens_and_encodings = [self._convert_encoding(encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, return_length=return_length, verbose=verbose) for encoding in encodings] sanitized_tokens = {} for key in tokens_and_encodings[0][0]: stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks['input_ids']) sanitized_tokens['overflow_to_sample_mapping'] = overflow_to_sample_mapping for input_ids in sanitized_tokens['input_ids']: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) token_boxes = [] for batch_index in range(len(sanitized_tokens['input_ids'])): if return_overflowing_tokens: original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index] else: original_index = batch_index token_boxes_example = [] for id, sequence_id, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids): if word_id is not None: if is_pair and sequence_id == 0: token_boxes_example.append(self.pad_token_box) else: token_boxes_example.append(boxes[original_index][word_id]) elif id == self.sep_token_id: token_boxes_example.append(self.sep_token_box) elif id == self.pad_token_id: token_boxes_example.append(self.pad_token_box) else: raise ValueError('Id not recognized') token_boxes.append(token_boxes_example) sanitized_tokens['bbox'] = token_boxes if word_labels is not None: labels = [] for batch_index in range(len(sanitized_tokens['input_ids'])): if return_overflowing_tokens: original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index] else: original_index = batch_index labels_example = [] previous_token_empty = False for id, offset, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_tokens['offset_mapping'][batch_index], sanitized_encodings[batch_index].word_ids): if word_id is not None: if self.only_label_first_subword: if offset[0] == 0 and (not previous_token_empty): labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) else: labels_example.append(word_labels[original_index][word_id]) if self.decode(id) == '': previous_token_empty = True else: previous_token_empty = False else: labels_example.append(self.pad_token_label) labels.append(labels_example) sanitized_tokens['labels'] = labels if not return_offsets_mapping: del sanitized_tokens['offset_mapping'] return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) def _encode_plus_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: batched_input = [(text, text_pair)] if text_pair else [text] batched_boxes = [boxes] batched_word_labels = [word_labels] if word_labels is not None else None batched_output = self._batch_encode_plus_boxes(batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) if return_tensors is None and (not return_overflowing_tokens): batched_output = BatchEncoding({key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items()}, batched_output.encodings) self._eventual_warn_about_too_long_sequence(batched_output['input_ids'], max_length, verbose) return batched_output def encode_boxes(self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> list[int]: """ Args: Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. text (`str`, `list[str]` or `list[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ encoded_inputs = self.encode_plus_boxes(text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, return_tensors=return_tensors, **kwargs) return encoded_inputs['input_ids'] def encode_plus_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: text (`str`, `list[str]` or (for non-fast tokenizers) `list[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._encode_plus_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ if return_attention_mask is None: return_attention_mask = 'attention_mask' in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length if return_attention_mask and 'attention_mask' not in encoded_inputs: encoded_inputs['attention_mask'] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == 'right': if return_attention_mask: encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference if 'token_type_ids' in encoded_inputs: encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference if 'bbox' in encoded_inputs: encoded_inputs['bbox'] = encoded_inputs['bbox'] + [self.pad_token_box] * difference if 'labels' in encoded_inputs: encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference if 'special_tokens_mask' in encoded_inputs: encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == 'left': if return_attention_mask: encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask'] if 'token_type_ids' in encoded_inputs: encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids'] if 'bbox' in encoded_inputs: encoded_inputs['bbox'] = [self.pad_token_box] * difference + encoded_inputs['bbox'] if 'labels' in encoded_inputs: encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels'] if 'special_tokens_mask' in encoded_inputs: encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask'] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError('Invalid padding strategy:' + str(padding_side)) return encoded_inputs def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep + token_ids_1 + sep) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.') if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory.') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
class UdopTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" UDOP tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`LayoutXLMTokenizer`] and [`T5Tokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): Path to the vocabulary file. tokenizer_file (`str`, *optional*): Path to the tokenizer file. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. ''' def __init__(self, vocab_file=None, tokenizer_file=None, eos_token='</s>', sep_token='</s>', unk_token='<unk>', pad_token='<pad>', sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, additional_special_tokens=None, **kwargs): pass @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, **kwargs) -> BatchEncoding: pass @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def call_boxes(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`list[list[int]]`, `list[list[list[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`list[int]`, `list[list[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). ''' pass def _is_valid_text_input(t): pass def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]: pass def batch_encode_plus_boxes(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: batch_text_or_text_pairs (`list[str]`, `list[tuple[str, str]]`, `list[list[str]]`, `list[tuple[list[str], list[str]]]`, and for not-fast tokenizers, also `list[list[int]]`, `list[tuple[list[int], list[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in `encode_plus`). ''' pass def _batch_encode_plus_boxes(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass def _encode_plus_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass def encode_boxes(self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> list[int]: ''' Args: Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. text (`str`, `list[str]` or `list[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). ''' pass def encode_plus_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' Tokenize and prepare for the model a sequence or a pair of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: text (`str`, `list[str]` or (for non-fast tokenizers) `list[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). ''' pass def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict: ''' Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) ''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
17
8
55
4
41
10
6
0.3
1
15
2
0
14
5
14
102
881
84
613
237
424
186
210
64
194
24
3
6
94
5,721
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/configuration_umt5.py
transformers.models.umt5.configuration_umt5.UMT5Config
from ...configuration_utils import PretrainedConfig class UMT5Config(PretrainedConfig): """ This is the configuration class to store the configuration of a [`UMT5Model`]. It is used to instantiate a UMT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UMT5 [google/umt5-small](https://huggingface.co/google/umt5-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 250112): Vocabulary size of the UMT5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`UMT5Model`] or [`TFUMT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // num_heads`. d_ff (`int`, *optional*, defaults to 1024): Size of the intermediate feed forward layer in each `UMT5Block`. num_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = 'umt5' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', 'head_dim': 'd_kv'} def __init__(self, vocab_size=250112, d_model=512, d_kv=64, d_ff=1024, num_layers=8, num_decoder_layers=None, num_heads=6, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='gated-gelu', is_encoder_decoder=True, use_cache=True, tokenizer_class='T5Tokenizer', tie_word_embeddings=True, pad_token_id=0, eos_token_id=1, decoder_start_token_id=0, classifier_dropout=0.0, **kwargs): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.classifier_dropout = classifier_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache act_info = self.feed_forward_proj.split('-') self.dense_act_fn = act_info[-1] self.is_gated_act = act_info[0] == 'gated' if len(act_info) > 1 and act_info[0] != 'gated' or len(act_info) > 2: raise ValueError(f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. Please make sure `feed_forward_proj` is of the format `gated-{{ACT_FN}}` or `{{ACT_FN}}`, e.g. 'gated-gelu' or 'relu'") if feed_forward_proj == 'gated-gelu': self.dense_act_fn = 'gelu_new' super().__init__(is_encoder_decoder=is_encoder_decoder, tokenizer_class=tokenizer_class, tie_word_embeddings=tie_word_embeddings, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs)
class UMT5Config(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`UMT5Model`]. It is used to instantiate a UMT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UMT5 [google/umt5-small](https://huggingface.co/google/umt5-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 250112): Vocabulary size of the UMT5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`UMT5Model`] or [`TFUMT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // num_heads`. d_ff (`int`, *optional*, defaults to 1024): Size of the intermediate feed forward layer in each `UMT5Block`. num_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). ''' def __init__(self, vocab_size=250112, d_model=512, d_kv=64, d_ff=1024, num_layers=8, num_decoder_layers=None, num_heads=6, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='gated-gelu', is_encoder_decoder=True, use_cache=True, tokenizer_class='T5Tokenizer', tie_word_embeddings=True, pad_token_id=0, eos_token_id=1, decoder_start_token_id=0, classifier_dropout=0.0, **kwargs): pass
2
1
66
4
62
1
4
0.61
1
2
0
0
1
17
1
1
121
8
71
47
45
43
28
23
26
4
1
1
4
5,722
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/configuration_umt5.py
transformers.models.umt5.configuration_umt5.UMT5OnnxConfig
from ...onnx import OnnxSeq2SeqConfigWithPast from collections.abc import Mapping class UMT5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = {'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}} if self.use_past: common_inputs['attention_mask'][1] = 'past_encoder_sequence + sequence' common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') return common_inputs @property def default_onnx_opset(self) -> int: return 13 @property def atol_for_validation(self) -> float: return 0.0005
class UMT5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def default_onnx_opset(self) -> int: pass @property def atol_for_validation(self) -> float: pass
7
0
7
1
6
0
2
0.09
1
3
0
0
3
0
3
3
29
4
23
8
16
2
16
5
12
3
1
1
5
5,723
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5Attention
from typing import Optional, Union from ...utils.deprecation import deprecate_kwarg import math from torch import nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class UMT5Attention(nn.Module): """ T5's attention using relative_attention_bias. """ def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() def _shape(self, projection: torch.Tensor) -> torch.Tensor: new_projection_shape = projection.size()[:-1] + (self.n_heads, self.key_value_proj_dim) new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3) return new_projection def _relative_position_bucket(self, relative_position): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 num_buckets = self.relative_attention_num_buckets max_distance = self.relative_attention_max_distance if not self.is_decoder: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) max_exact = num_buckets // 2 is_small = relative_position < max_exact log_ratio = torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) log_ratio = log_ratio * (num_buckets - max_exact) relative_position_if_large = max_exact + log_ratio.to(torch.long) relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None, cache_position=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device if cache_position is None: context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] else: context_position = cache_position[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_position_bucket(relative_position) values = self.relative_attention_bias(relative_position_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) return values @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.Tensor]=None): batch_size, seq_length = hidden_states.shape[:2] is_cross_attention = encoder_hidden_states is not None query_states = self.q(hidden_states) query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) is_updated = False if past_key_values is not None and isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = encoder_hidden_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k(current_states) value_states = self.v(current_states) key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True scores = torch.matmul(query_states, key_states.transpose(3, 2)) real_seq_length = seq_length + past_key_values.get_seq_length() if past_key_values is not None else seq_length key_length = key_states.shape[-2] if not self.has_relative_attention_bias: position_bias = torch.zeros((1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype) else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device, cache_position=cache_position) position_bias = position_bias[:, :, -seq_length:, :] if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key_states.shape[-2]] position_bias = position_bias + causal_mask if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(batch_size, seq_length, -1) attn_output = self.o(attn_output) return (attn_output, attn_weights)
class UMT5Attention(nn.Module): ''' T5's attention using relative_attention_bias. ''' def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass def _shape(self, projection: torch.Tensor) -> torch.Tensor: pass def _relative_position_bucket(self, relative_position): ''' Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) ''' pass def compute_bias(self, query_length, key_length, device=None, cache_position=None): '''Compute binned relative position bias''' pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.Tensor]=None): pass
7
3
37
5
26
7
4
0.29
1
5
0
0
5
16
5
15
192
28
129
61
115
38
103
53
97
13
1
3
22
5,724
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5Block
from torch import nn import torch from typing import Optional, Union from ...modeling_layers import GradientCheckpointingLayer from ...utils.deprecation import deprecate_kwarg class UMT5Block(GradientCheckpointingLayer): def __init__(self, config, layer_idx: Optional[int]=None): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(UMT5LayerSelfAttention(config, layer_idx=layer_idx)) if self.is_decoder: self.layer.append(UMT5LayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(UMT5LayerFF(config)) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None): hidden_states, self_attn_weights = self.layer[0](hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, past_key_values=past_key_values, cache_position=cache_position) if hidden_states.dtype == torch.float16: max_dtype = torch.finfo(hidden_states.dtype).max clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) cross_attn_weights = None do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: hidden_states, cross_attn_weights = self.layer[1](hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, cache_position=cache_position) if hidden_states.dtype == torch.float16: max_dtype = torch.finfo(hidden_states.dtype).max clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) hidden_states = self.layer[-1](hidden_states) if hidden_states.dtype == torch.float16: max_dtype = torch.finfo(hidden_states.dtype).max clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
class UMT5Block(GradientCheckpointingLayer): def __init__(self, config, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None): pass
4
0
36
4
30
3
4
0.08
1
5
3
0
2
2
2
12
74
9
60
23
45
5
32
11
29
6
1
2
8
5,725
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5ClassificationHead
import torch from torch import nn from .configuration_umt5 import UMT5Config class UMT5ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config: UMT5Config): super().__init__() self.dense = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(p=config.classifier_dropout) self.out_proj = nn.Linear(config.d_model, config.num_labels) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states
class UMT5ClassificationHead(nn.Module): '''Head for sentence-level classification tasks.''' def __init__(self, config: UMT5Config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
1
6
0
6
0
1
0.08
1
3
1
0
2
3
2
12
16
2
13
6
10
1
13
6
10
1
1
0
2
5,726
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5DenseActDense
from ...activations import ACT2FN import torch from torch import nn from .configuration_umt5 import UMT5Config class UMT5DenseActDense(nn.Module): def __init__(self, config: UMT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class UMT5DenseActDense(nn.Module): def __init__(self, config: UMT5Config): pass def forward(self, hidden_states): pass
3
0
9
0
9
0
2
0
1
3
1
0
2
4
2
12
20
1
19
7
16
0
15
7
12
2
1
1
3
5,727
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5DenseGatedActDense
import torch from torch import nn from .configuration_umt5 import UMT5Config from ...activations import ACT2FN class UMT5DenseGatedActDense(nn.Module): def __init__(self, config: UMT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class UMT5DenseGatedActDense(nn.Module): def __init__(self, config: UMT5Config): pass def forward(self, hidden_states): pass
3
0
13
1
10
2
2
0.14
1
3
1
0
2
5
2
12
27
3
21
10
18
3
17
10
14
2
1
1
3
5,728
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5EncoderModel
from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from typing import Optional, Union import copy @auto_docstring class UMT5EncoderModel(UMT5PreTrainedModel): """ Examples: ```python >>> from transformers import UMT5EncoderModel, AutoTokenizer >>> model = UMT5EncoderModel.from_pretrained("google/umt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> input_ids = tokenizer(article, return_tensors="pt").input_ids >>> outputs = model(input_ids) >>> hidden_state = outputs.last_hidden_state ```""" model_type = 'umt5' _tied_weights_keys = ['encoder.embed_tokens.weight'] def __init__(self, config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = UMT5Stack(encoder_config, self.shared) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). Example: ```python >>> from transformers import AutoTokenizer, UMT5EncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> model = UMT5EncoderModel.from_pretrained("google/umt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return encoder_outputs
@auto_docstring class UMT5EncoderModel(UMT5PreTrainedModel): ''' Examples: ```python >>> from transformers import UMT5EncoderModel, AutoTokenizer >>> model = UMT5EncoderModel.from_pretrained("google/umt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> input_ids = tokenizer(article, return_tensors="pt").input_ids >>> outputs = model(input_ids) >>> hidden_state = outputs.last_hidden_state ```''' def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def _tie_weights(self): pass def get_encoder(self): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). Example: ```python >>> from transformers import AutoTokenizer, UMT5EncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> model = UMT5EncoderModel.from_pretrained("google/umt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```''' pass
10
3
10
1
6
3
1
0.77
1
4
2
0
7
2
7
10
100
17
47
25
28
36
28
15
20
2
2
1
10
5,729
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5ForConditionalGeneration
from torch import nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from typing import Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import copy from ...generation import GenerationMixin @auto_docstring(custom_intro='\n UMT5 Model with a `language modeling` head on top.\n ') class UMT5ForConditionalGeneration(UMT5PreTrainedModel, GenerationMixin): """ Examples: ```python >>> from transformers import UMT5ForConditionalGeneration, AutoTokenizer >>> model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, text_target=summary, return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss ```""" model_type = 'umt5' _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = UMT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = UMT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5 Training](./umt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoTokenizer, UMT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer("Studies have shown that <extra_id_0> good for you", return_tensors="pt").input_ids >>> outputs = model.generate(input_ids) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and (decoder_inputs_embeds is None): decoder_input_ids = self._shift_right(labels) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: sequence_output = sequence_output * self.model_dim ** (-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return (loss,) + output if loss is not None else output return Seq2SeqLMOutput(loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels)
@auto_docstring(custom_intro='\n UMT5 Model with a `language modeling` head on top.\n ') class UMT5ForConditionalGeneration(UMT5PreTrainedModel, GenerationMixin): ''' Examples: ```python >>> from transformers import UMT5ForConditionalGeneration, AutoTokenizer >>> model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, text_target=summary, return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss ```''' def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def _tie_weights(self): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5 Training](./umt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoTokenizer, UMT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer("Studies have shown that <extra_id_0> good for you", return_tensors="pt").input_ids >>> outputs = model.generate(input_ids) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) ```''' pass def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): pass
10
2
16
2
11
3
2
0.39
2
7
3
0
10
5
11
14
214
36
128
51
94
50
67
30
55
12
2
1
24
5,730
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5ForQuestionAnswering
from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from typing import Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import copy from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput @auto_docstring class UMT5ForQuestionAnswering(UMT5PreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = UMT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = UMT5Stack(decoder_config, self.shared) self.num_labels = config.num_labels self.qa_outputs = nn.Linear(config.d_model, config.num_labels) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5 Training](./umt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if start_positions is not None and end_positions is not None: use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError('If no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.') decoder_input_ids = self._shift_right(input_ids) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) hidden_states = encoder_outputs[0] decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=None, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = decoder_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + decoder_outputs[1:] + encoder_outputs return (total_loss,) + output if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class UMT5ForQuestionAnswering(UMT5PreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def _tie_weights(self): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5 Training](./umt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. ''' pass
9
1
23
2
18
3
3
0.18
1
7
3
0
7
6
7
10
179
23
132
47
104
24
71
28
63
17
2
2
24
5,731
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5ForSequenceClassification
import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from .configuration_umt5 import UMT5Config from typing import Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @auto_docstring(custom_intro='\n UMT5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ') class UMT5ForSequenceClassification(UMT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight'] _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: UMT5Config): super().__init__(config) self.transformer = UMT5Model(config) self.classification_head = UMT5ClassificationHead(config) self.post_init() self.model_parallel = False @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Seq2SeqSequenceClassifierOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5 Training](./umt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}') if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError('If no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.') decoder_input_ids = self._shift_right(input_ids) outputs = self.transformer(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError('All examples must have the same number of <eos> tokens.') batch_size, _, hidden_size = sequence_output.shape sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = 'regression' elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Seq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
@auto_docstring(custom_intro='\n UMT5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ') class UMT5ForSequenceClassification(UMT5PreTrainedModel): def __init__(self, config: UMT5Config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Seq2SeqSequenceClassifierOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5 Training](./umt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
59
5
49
5
9
0.1
1
10
4
0
2
3
2
5
125
12
103
35
81
10
48
17
45
17
2
3
18
5,732
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5ForTokenClassification
from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from .configuration_umt5 import UMT5Config from typing import Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @auto_docstring class UMT5ForTokenClassification(UMT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight'] _tied_weights_keys = ['transformer.encoder.embed_tokens.weight'] def __init__(self, config: UMT5Config): super().__init__(config) self.num_labels = config.num_labels self.transformer = UMT5EncoderModel(config) self.dropout = nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits, outputs[2:-1]) return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class UMT5ForTokenClassification(UMT5PreTrainedModel): def __init__(self, config: UMT5Config): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
5
1
29
4
22
3
3
0.16
1
6
3
0
2
4
2
5
66
9
49
26
34
8
24
15
21
5
2
1
6
5,733
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5LayerCrossAttention
from ...utils.deprecation import deprecate_kwarg from typing import Optional, Union from torch import nn class UMT5LayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): super().__init__() self.EncDecAttention = UMT5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx) self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, layer_head_mask=None, past_key_values=None, cache_position=None): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention(normed_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, past_key_values=past_key_values, cache_position=cache_position) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] return outputs
class UMT5LayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, layer_head_mask=None, past_key_values=None, cache_position=None): pass
4
0
13
0
13
1
1
0.04
1
4
2
0
2
3
2
12
28
1
27
18
16
1
12
10
9
1
1
0
2
5,734
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5LayerFF
from .configuration_umt5 import UMT5Config from torch import nn class UMT5LayerFF(nn.Module): def __init__(self, config: UMT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = UMT5DenseGatedActDense(config) else: self.DenseReluDense = UMT5DenseActDense(config) self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states
class UMT5LayerFF(nn.Module): def __init__(self, config: UMT5Config): pass def forward(self, hidden_states): pass
3
0
7
1
7
0
2
0
1
5
4
0
2
3
2
12
16
2
14
7
11
0
13
7
10
2
1
1
3
5,735
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5LayerNorm
import torch from torch import nn class UMT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Construct a layernorm module in the UMT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states
class UMT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): ''' Construct a layernorm module in the UMT5 style. No bias and no subtraction of mean. ''' pass def forward(self, hidden_states): pass
3
1
11
2
5
4
2
0.73
1
1
0
0
2
2
2
12
23
4
11
6
8
8
11
6
8
2
1
1
3
5,736
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5LayerSelfAttention
from ...utils.deprecation import deprecate_kwarg from typing import Optional, Union from torch import nn class UMT5LayerSelfAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): super().__init__() self.SelfAttention = UMT5Attention(config, has_relative_attention_bias=True, layer_idx=layer_idx) self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, layer_head_mask=None, past_key_values=None, cache_position=None): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention(normed_hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, past_key_values=past_key_values, cache_position=cache_position) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] return outputs
class UMT5LayerSelfAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, layer_head_mask=None, past_key_values=None, cache_position=None): pass
4
0
12
0
12
1
1
0.04
1
4
2
0
2
3
2
12
26
1
25
16
15
1
12
9
9
1
1
0
2
5,737
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5Model
from torch import nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from .configuration_umt5 import UMT5Config from typing import Optional, Union import copy @auto_docstring class UMT5Model(UMT5PreTrainedModel): """ Examples: ```python >>> from transformers import UMT5Model, AutoTokenizer >>> model = UMT5Model.from_pretrained("google/umt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> noisy_text = "UN Offizier sagt, dass weiter <extra_id_0> werden muss in Syrien." >>> label = "<extra_id_0> verhandelt" >>> inputs = tokenizer(inputs, return_tensors="pt") >>> labels = tokenizer(label=label, return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" model_type = 'umt5' config: UMT5Config _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = UMT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = UMT5Stack(decoder_config, self.shared) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5 Training](./umt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, UMT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> model = UMT5Model.from_pretrained("google/umt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for UMT5Model. >>> # This is not needed for torch's UMT5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) hidden_states = encoder_outputs[0] decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class UMT5Model(UMT5PreTrainedModel): ''' Examples: ```python >>> from transformers import UMT5Model, AutoTokenizer >>> model = UMT5Model.from_pretrained("google/umt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> noisy_text = "UN Offizier sagt, dass weiter <extra_id_0> werden muss in Syrien." >>> label = "<extra_id_0> verhandelt" >>> inputs = tokenizer(inputs, return_tensors="pt") >>> labels = tokenizer(label=label, return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```''' def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def _tie_weights(self): pass def get_encoder(self): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5 Training](./umt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, UMT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> model = UMT5Model.from_pretrained("google/umt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for UMT5Model. >>> # This is not needed for torch's UMT5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```''' pass
10
3
17
2
12
3
2
0.43
1
6
3
0
8
3
8
11
172
26
102
39
73
44
46
20
37
8
2
1
17
5,738
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5PreTrainedModel
import torch from ...modeling_utils import PreTrainedModel from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from .configuration_umt5 import UMT5Config @auto_docstring class UMT5PreTrainedModel(PreTrainedModel): config: UMT5Config base_model_prefix = 'transformer' supports_gradient_checkpointing = True _can_compile_fullgraph = True _no_split_modules = ['UMT5Block'] _keep_in_fp32_modules = ['wo'] @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = {'decoder_input_ids': input_ids, 'input_ids': input_ids, 'decoder_attention_mask': input_mask} return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, UMT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (UMT5Model, UMT5ForConditionalGeneration, UMT5EncoderModel, UMT5ForQuestionAnswering)): module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, 'lm_head') and (not self.config.tie_word_embeddings): module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, 'qa_outputs'): module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) module.qa_outputs.bias.data.zero_() elif isinstance(module, UMT5ForTokenClassification): if hasattr(module, 'classifier'): module.classifier.weight.data.normal_(mean=0.0, std=factor * 1.0) module.classifier.bias.data.zero_() elif isinstance(module, UMT5ClassificationHead): module.dense.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.dense, 'bias') and module.dense.bias is not None: module.dense.bias.data.zero_() module.out_proj.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.out_proj, 'bias') and module.out_proj.bias is not None: module.out_proj.bias.data.zero_() elif isinstance(module, UMT5DenseActDense): module.wi.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi, 'bias') and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5)) if hasattr(module.wo, 'bias') and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, UMT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi_0, 'bias') and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi_1, 'bias') and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5)) if hasattr(module.wo, 'bias') and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, UMT5Attention): d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * (n_heads * key_value_proj_dim) ** (-0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError('self.model.config.decoder_start_token_id has to be defined. In UMT5 it is usually set to the pad_token_id. See UMT5 docs for more information.') if is_torch_fx_proxy(input_ids): shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError('self.model.config.pad_token_id has to be defined.') shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids
@auto_docstring class UMT5PreTrainedModel(PreTrainedModel): @property def dummy_inputs(self): pass def _init_weights(self, module): '''Initialize the weights''' pass def _shift_right(self, input_ids): pass
6
1
33
1
28
4
8
0.17
1
11
10
7
3
0
3
3
117
8
94
22
89
16
71
21
67
19
1
2
24
5,739
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/umt5/modeling_umt5.py
transformers.models.umt5.modeling_umt5.UMT5Stack
from torch import nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from typing import Optional, Union from ...modeling_attn_mask_utils import AttentionMaskConverter class UMT5Stack(UMT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList([UMT5Block(config, layer_idx=i) for i in range(config.num_layers)]) self.final_layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.gradient_checkpointing = False self.post_init() def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if inputs_embeds is None: if self.embed_tokens is None: raise ValueError('You have to initialize the model with valid token embeddings') inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if use_cache is True: if not self.is_decoder: raise ValueError(f'`use_cache` can only be set to `True` if {self} is used as a decoder') if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) else: past_key_values = DynamicCache(config=self.config) elif not self.is_decoder: past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if attention_mask is None and (not is_torchdynamo_compiling()): mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.is_decoder: causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions) elif attention_mask is not None: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min else: causal_mask = None if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.is_decoder else None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, causal_mask, encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if self.is_decoder: all_cross_attentions += (layer_outputs[2],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
class UMT5Stack(UMT5PreTrainedModel): def __init__(self, config, embed_tokens=None): pass def set_input_embeddings(self, new_embeddings): pass def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass
7
1
56
5
44
7
10
0.16
1
16
8
0
5
6
6
9
342
36
265
77
226
43
138
45
131
43
2
3
59
5,740
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/configuration_unispeech.py
transformers.models.unispeech.configuration_unispeech.UniSpeechConfig
import operator import functools from ...configuration_utils import PretrainedConfig class UniSpeechConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`UniSpeechModel`]. It is used to instantiate an UniSpeech model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UniSpeech [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the UniSpeech model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`UniSpeechModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`UniSpeechModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the output of the feature encoder that's used by the quantizer. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`UniSpeechForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_extract_activation (`str, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 2, 2)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. do_stable_layer_norm (`bool`, *optional*, defaults to `False`): Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is False` corresponds to applying layer norm after the attention layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://huggingface.co/papers/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2): The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0): The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' num_codevectors_per_group (`int`, *optional*, defaults to 320): Number of entries in each quantization codebook (group). num_codevector_groups (`int`, *optional*, defaults to 2): Number of codevector groups for product codevector quantization. contrastive_logits_temperature (`float`, *optional*, defaults to 0.1): The temperature *kappa* in the contrastive loss. num_negatives (`int`, *optional*, defaults to 100): Number of negative samples for the contrastive loss. codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the quantized feature vectors. proj_codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the final projection of both the quantized and the transformer features. diversity_loss_weight (`int`, *optional*, defaults to 0.1): The weight of the codebook diversity loss component. ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`UniSpeechForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`UniSpeechForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`UniSpeechForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. num_ctc_classes (`int`, *optional*, defaults to 80): Specifies the number of classes (phoneme tokens and blank token) for phoneme-level CTC loss. Only relevant when using an instance of [`UniSpeechForPreTraining`]. pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. replace_prob (`float`, *optional*, defaults to 0.5): Probability that transformer feature is replaced by quantized feature for pretraining. Example: ```python >>> from transformers import UniSpeechConfig, UniSpeechModel >>> # Initializing a UniSpeech facebook/unispeech-base-960h style configuration >>> configuration = UniSpeechConfig() >>> # Initializing a model (with random weights) from the facebook/unispeech-base-960h style configuration >>> model = UniSpeechModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'unispeech' def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction='mean', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, num_ctc_classes=80, pad_token_id=0, bos_token_id=1, eos_token_id=2, replace_prob=0.5, **kwargs): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.num_ctc_classes = num_ctc_classes self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.use_weighted_layer_sum = use_weighted_layer_sum self.classifier_proj_size = classifier_proj_size if len(self.conv_stride) != self.num_feat_extract_layers or len(self.conv_kernel) != self.num_feat_extract_layers or len(self.conv_dim) != self.num_feat_extract_layers: raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.') self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks self.num_codevectors_per_group = num_codevectors_per_group self.num_codevector_groups = num_codevector_groups self.contrastive_logits_temperature = contrastive_logits_temperature self.feat_quantizer_dropout = feat_quantizer_dropout self.num_negatives = num_negatives self.codevector_dim = codevector_dim self.proj_codevector_dim = proj_codevector_dim self.diversity_loss_weight = diversity_loss_weight self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity self.replace_prob = replace_prob @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1)
null
4
1
60
3
55
2
2
1.34
1
3
0
0
2
45
2
2
280
16
113
100
59
151
53
49
50
2
1
1
3
5,741
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechAttention
from .configuration_unispeech import UniSpeechConfig import torch.nn as nn from typing import Callable, Optional, Union import torch from ...processing_utils import Unpack from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...modeling_flash_attention_utils import FlashAttentionKwargs class UniSpeechAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[UniSpeechConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) current_states = key_value_states if is_cross_attention else hidden_states key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights, None)
class UniSpeechAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[UniSpeechConfig]=None): pass def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
3
2
50
7
35
8
5
0.24
1
7
1
2
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
5,742
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechAttnAdapterLayer
import torch import torch.nn as nn class UniSpeechAttnAdapterLayer(nn.Module): def __init__(self, config): """ Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed up training throughput. """ super().__init__() self.input_dim = config.adapter_attn_dim self.hidden_dim = config.hidden_size self.norm = nn.LayerNorm(self.hidden_dim) self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim) self.act_fn = nn.ReLU() self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim) def forward(self, hidden_states: torch.FloatTensor): hidden_states = self.norm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states
class UniSpeechAttnAdapterLayer(nn.Module): def __init__(self, config): ''' Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed up training throughput. ''' pass def forward(self, hidden_states: torch.FloatTensor): pass
3
1
11
2
7
2
1
0.27
1
1
0
0
2
6
2
12
23
4
15
9
12
4
15
9
12
1
1
0
2
5,743
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechEncoder
from ...integrations.fsdp import is_fsdp_managed_module from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput from ...integrations.deepspeed import is_deepspeed_zero3_enabled from typing import Callable, Optional, Union import torch.nn as nn from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa import torch class UniSpeechEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([UniSpeechEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 attention_mask = self._update_full_mask(attention_mask, hidden_states) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.layerdrop if not skip_the_layer or synced_gpus: layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): if attention_mask is not None: if 'flash' in self.config._attn_implementation: attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask
class UniSpeechEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): pass def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): pass
4
0
41
5
33
3
8
0.07
1
8
3
0
2
7
2
12
83
11
67
26
57
5
45
19
42
15
1
3
16
5,744
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechEncoderLayer
from ...modeling_layers import GradientCheckpointingLayer import torch.nn as nn class UniSpeechEncoderLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.attention = UniSpeechAttention(embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, output_attentions=False): attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class UniSpeechEncoderLayer(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, output_attentions=False): pass
3
0
16
3
13
0
2
0
1
2
1
0
2
5
2
12
33
6
27
11
24
0
20
11
17
2
1
1
3
5,745
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechEncoderLayerStableLayerNorm
from ...modeling_layers import GradientCheckpointingLayer import torch.nn as nn import torch from typing import Callable, Optional, Union class UniSpeechEncoderLayerStableLayerNorm(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.attention = UniSpeechAttention(embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if getattr(config, 'adapter_attn_dim', None) is not None: self.adapter_layer = UniSpeechAttnAdapterLayer(config) else: self.adapter_layer = None def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) if self.adapter_layer is not None: hidden_states = hidden_states + self.adapter_layer(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class UniSpeechEncoderLayerStableLayerNorm(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False): pass
3
0
21
3
18
0
3
0
1
5
2
0
2
6
2
12
43
6
37
17
29
0
24
12
21
3
1
1
5
5,746
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechEncoderStableLayerNorm
from ...integrations.fsdp import is_fsdp_managed_module from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput from ...integrations.deepspeed import is_deepspeed_zero3_enabled from typing import Callable, Optional, Union import torch.nn as nn from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa import torch class UniSpeechEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([UniSpeechEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 attention_mask = self._update_full_mask(attention_mask, hidden_states) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.layerdrop if not skip_the_layer or synced_gpus: layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): if attention_mask is not None: if 'flash' in self.config._attn_implementation: attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask
class UniSpeechEncoderStableLayerNorm(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True): pass def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): pass
4
0
43
6
34
3
8
0.09
1
6
3
0
2
7
2
12
87
12
69
27
59
6
45
19
42
15
1
3
16
5,747
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechFeatureEncoder
import torch.nn as nn class UniSpeechFeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == 'group': conv_layers = [UniSpeechGroupNormConvLayer(config, layer_id=0)] + [UniSpeechNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)] elif config.feat_extract_norm == 'layer': conv_layers = [UniSpeechLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)] else: raise ValueError(f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']") self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: hidden_states = conv_layer(hidden_states) return hidden_states
class UniSpeechFeatureEncoder(nn.Module): '''Construct the features from raw audio waveform''' def __init__(self, config): pass def _freeze_parameters(self): pass def forward(self, input_values): pass
4
1
13
1
12
0
3
0.06
1
6
3
1
3
3
3
13
45
7
36
11
32
2
23
11
19
4
1
2
9
5,748
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechFeatureProjection
import torch.nn as nn class UniSpeechFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return (hidden_states, norm_hidden_states)
class UniSpeechFeatureProjection(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
6
0
5
1
1
0.09
1
1
0
0
2
3
2
12
13
1
11
7
8
1
11
7
8
1
1
0
2
5,749
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechFeedForward
import torch.nn as nn from ...activations import ACT2FN class UniSpeechFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states
class UniSpeechFeedForward(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
10
2
9
0
2
0
1
2
0
0
2
5
2
12
22
4
18
8
15
0
17
8
14
2
1
1
3
5,750
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechForCTC
from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput from typing import Callable, Optional, Union import torch.nn as nn import warnings import torch @auto_docstring(custom_intro='\n UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n ') class UniSpeechForCTC(UniSpeechPreTrainedModel): def __init__(self, config, target_lang: Optional[str]=None): """ target_lang (`str`, *optional*): Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechForCTC`] with adapters. Uses 'eng' by default. """ super().__init__(config) self.unispeech = UniSpeechModel(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError(f"You are trying to instantiate {self.__class__} with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.") output_hidden_size = config.output_hidden_size if hasattr(config, 'add_adapter') and config.add_adapter else config.hidden_size self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) self.post_init() def tie_weights(self): """ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when passing `target_lang=...` to `from_pretrained(...)`. This method is **not** supposed to be called by the user and is prone to be changed in the future. """ target_lang = self.target_lang if target_lang is not None and getattr(self.config, 'adapter_attn_dim', None) is None: raise ValueError(f'Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.') elif target_lang is None and getattr(self.config, 'adapter_attn_dim', None) is not None: logger.info("By default `target_lang` is set to 'eng'.") elif target_lang is not None: self.load_adapter(target_lang, force_load=True) def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech.parameters(): param.requires_grad = False @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and labels.max() >= self.config.vocab_size: raise ValueError(f'Label values must be <= vocab_size: {self.config.vocab_size}') outputs = self.unispeech(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: attention_mask = attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss(log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return (loss,) + output if loss is not None else output return CausalLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n ') class UniSpeechForCTC(UniSpeechPreTrainedModel): def __init__(self, config, target_lang: Optional[str]=None): ''' target_lang (`str`, *optional*): Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechForCTC`] with adapters. Uses 'eng' by default. ''' pass def tie_weights(self): ''' This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when passing `target_lang=...` to `from_pretrained(...)`. This method is **not** supposed to be called by the user and is prone to be changed in the future. ''' pass def freeze_feature_extractor(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass def freeze_feature_encoder(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass def freeze_base_model(self): ''' Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. ''' pass @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. ''' pass
9
6
23
3
14
6
3
0.35
1
8
2
0
6
4
6
9
149
22
94
33
71
33
47
24
40
7
2
2
18
5,751
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTraining
from .configuration_unispeech import UniSpeechConfig import torch.nn as nn import warnings import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from typing import Callable, Optional, Union @auto_docstring(custom_intro='\n UniSpeech Model with a vector-quantization module and ctc loss for pre-training.\n ') class UniSpeechForPreTraining(UniSpeechPreTrainedModel): def __init__(self, config: UniSpeechConfig): super().__init__(config) self.unispeech = UniSpeechModel(config) self.dropout_features = nn.Dropout(config.feat_quantizer_dropout) self.quantizer = UniSpeechGumbelVectorQuantizer(config) self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim) self.project_hid = nn.Linear(config.proj_codevector_dim, config.hidden_size) self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes) self.dropout = nn.Dropout(config.final_dropout) self.post_init() def set_gumbel_temperature(self, temperature: int): """ Set the Gumbel softmax temperature to a given value. Only necessary for training """ self.quantizer.temperature = temperature def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech.feature_extractor._freeze_parameters() @staticmethod def compute_contrastive_logits(target_features: torch.FloatTensor, negative_features: torch.FloatTensor, predicted_features: torch.FloatTensor, temperature: int=1): """ Compute logits for contrastive loss based using cosine similarity as the distance measure between `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. """ target_features = torch.cat([target_features, negative_features], dim=0) logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1) logits = logits.type_as(target_features) logits = logits / temperature return logits @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, UniSpeechForPreTrainingOutput]: """ Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, UniSpeechForPreTraining >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-large-1500h-cv") >>> model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv") >>> # TODO: Add full pretraining example ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.unispeech(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) transformer_features = outputs[0] extract_features = self.dropout_features(outputs[1]) quantized_features, codevector_perplexity = self.quantizer(extract_features) quantized_features = self.project_q(quantized_features.to(self.project_q.weight.dtype)) quantized_features = self.project_hid(quantized_features) prob_replace_matrix = torch.empty(transformer_features.size(0), transformer_features.size(1)).fill_(self.config.replace_prob) prob_replace_matrix = prob_replace_matrix.transpose(0, 1) sampled_replace_matrix = torch.bernoulli(prob_replace_matrix).bool().to(transformer_features.device) sampled_replace_matrix = sampled_replace_matrix.transpose(0, 1) sampled_replace_matrix = sampled_replace_matrix.unsqueeze(-1) logits = transformer_features.masked_fill(sampled_replace_matrix, 0.0) + quantized_features.masked_fill(~sampled_replace_matrix, 0.0) logits = self.dropout(logits) logits = self.ctc_proj(logits) loss = None if not return_dict: if loss is not None: return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return UniSpeechForPreTrainingOutput(loss=loss, projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n UniSpeech Model with a vector-quantization module and ctc loss for pre-training.\n ') class UniSpeechForPreTraining(UniSpeechPreTrainedModel): def __init__(self, config: UniSpeechConfig): pass def set_gumbel_temperature(self, temperature: int): ''' Set the Gumbel softmax temperature to a given value. Only necessary for training ''' pass def freeze_feature_extractor(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. ''' pass def freeze_feature_encoder(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass @staticmethod def compute_contrastive_logits(target_features: torch.FloatTensor, negative_features: torch.FloatTensor, predicted_features: torch.FloatTensor, temperature: int=1): ''' Compute logits for contrastive loss based using cosine similarity as the distance measure between `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. ''' pass @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, UniSpeechForPreTrainingOutput]: ''' Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, UniSpeechForPreTraining >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-large-1500h-cv") >>> model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv") >>> # TODO: Add full pretraining example ```''' pass
10
5
22
3
13
6
2
0.45
1
9
4
0
5
7
6
9
141
22
82
37
60
37
46
23
39
4
2
2
9
5,752
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput
from dataclasses import dataclass import torch.nn as nn import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput from typing import Callable, Optional, Union @dataclass @auto_docstring(custom_intro='\n Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions.\n ') class UniSpeechForPreTrainingOutput(ModelOutput): """ loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://huggingface.co/papers/2006.11477). projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. codevector_perplexity (`torch.FloatTensor` of shape `(1,)`): The perplexity of the codevector distribution, used to measure the diversity of the codebook. """ loss: Optional[torch.FloatTensor] = None projected_states: Optional[torch.FloatTensor] = None projected_quantized_states: Optional[torch.FloatTensor] = None codevector_perplexity: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions.\n ') class UniSpeechForPreTrainingOutput(ModelOutput): ''' loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://huggingface.co/papers/2006.11477). projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. codevector_perplexity (`torch.FloatTensor` of shape `(1,)`): The perplexity of the codevector distribution, used to measure the diversity of the codebook. ''' pass
3
1
0
0
0
0
0
3.14
1
0
0
0
0
0
0
0
33
4
7
7
6
22
7
7
6
0
1
0
0
5,753
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechForSequenceClassification
from torch.nn import CrossEntropyLoss import torch.nn as nn import warnings import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput from typing import Callable, Optional, Union @auto_docstring(custom_intro='\n UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n ') class UniSpeechForSequenceClassification(UniSpeechPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, 'add_adapter') and config.add_adapter: raise ValueError('Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)') self.unispeech = UniSpeechModel(config) num_layers = config.num_hidden_layers + 1 if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech.parameters(): param.requires_grad = False @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, SequenceClassifierOutput]: """ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n ') class UniSpeechForSequenceClassification(UniSpeechPreTrainedModel): def __init__(self, config): pass def freeze_feature_extractor(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. ''' pass def freeze_feature_encoder(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass def freeze_base_model(self): ''' Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. ''' pass @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, SequenceClassifierOutput]: ''' input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
8
4
20
2
14
4
3
0.3
1
7
2
0
5
4
5
8
117
14
80
31
59
24
46
22
40
8
2
1
15
5,754
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechGroupNormConvLayer
from ...modeling_layers import GradientCheckpointingLayer from ...activations import ACT2FN import torch.nn as nn class UniSpeechGroupNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states
class UniSpeechGroupNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): pass def forward(self, hidden_states): pass
3
0
10
1
9
0
2
0
1
1
0
0
2
5
2
12
22
3
19
8
16
0
13
8
10
2
1
0
3
5,755
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechGumbelVectorQuantizer
import torch.nn as nn import torch class UniSpeechGumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError(f'`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups` {self.num_groups} for concatenation') self.codevectors = nn.Parameter(torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)) self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars) self.temperature = 2 @staticmethod def _compute_perplexity(probs): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-07), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True).type_as(hidden_states) codevector_soft_dist = torch.softmax(hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1) perplexity = self._compute_perplexity(codevector_soft_dist) else: codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(-1, codevector_idx.view(-1, 1), 1.0) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return (codevectors, perplexity)
class UniSpeechGumbelVectorQuantizer(nn.Module): ''' Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information. ''' def __init__(self, config): pass @staticmethod def _compute_perplexity(probs): pass def forward(self, hidden_states): pass
5
1
20
3
14
3
2
0.27
1
2
0
0
2
5
3
13
68
12
44
19
39
12
31
18
27
2
1
1
5
5,756
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechLayerNormConvLayer
from ...modeling_layers import GradientCheckpointingLayer from ...activations import ACT2FN import torch.nn as nn class UniSpeechLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states
class UniSpeechLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): pass def forward(self, hidden_states): pass
3
0
12
2
10
0
2
0
1
1
0
0
2
5
2
12
25
4
21
8
18
0
15
8
12
2
1
0
3
5,757
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechModel
from .configuration_unispeech import UniSpeechConfig import torch.nn as nn import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from typing import Callable, Optional, Union @auto_docstring class UniSpeechModel(UniSpeechPreTrainedModel): def __init__(self, config: UniSpeechConfig): super().__init__(config) self.config = config self.feature_extractor = UniSpeechFeatureEncoder(config) self.feature_projection = UniSpeechFeatureProjection(config) if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = UniSpeechEncoderStableLayerNorm(config) else: self.encoder = UniSpeechEncoder(config) self.post_init() def _mask_hidden_states(self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://huggingface.co/papers/1904.08779). """ if not getattr(self.config, 'apply_spec_augment', True): return hidden_states batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: mask_feature_indices = _compute_mask_indices((batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, UniSpeechBaseModelOutput]: """ mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask) encoder_outputs = self.encoder(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return UniSpeechBaseModelOutput(last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
@auto_docstring class UniSpeechModel(UniSpeechPreTrainedModel): def __init__(self, config: UniSpeechConfig): pass def _mask_hidden_states(self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None): ''' Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://huggingface.co/papers/1904.08779). ''' pass @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, UniSpeechBaseModelOutput]: ''' mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. ''' pass
6
2
36
5
27
3
5
0.12
1
9
6
0
3
5
3
6
119
17
91
28
66
11
42
14
38
6
2
1
14
5,758
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechNoLayerNormConvLayer
import torch.nn as nn from ...modeling_layers import GradientCheckpointingLayer from ...activations import ACT2FN class UniSpeechNoLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states
class UniSpeechNoLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): pass def forward(self, hidden_states): pass
3
0
9
1
8
0
2
0
1
1
0
0
2
4
2
12
19
2
17
7
14
0
11
7
8
2
1
0
3
5,759
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechPositionalConvEmbedding
from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled import torch.nn as nn class UniSpeechPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, 'weight_norm'): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name='weight', dim=2) if hasattr(self.conv, 'parametrizations'): weight_g = self.conv.parametrizations.weight.original0 weight_v = self.conv.parametrizations.weight.original1 else: weight_g = self.conv.weight_g weight_v = self.conv.weight_v deepspeed.zero.register_external_parameter(self, weight_v) deepspeed.zero.register_external_parameter(self, weight_g) else: self.conv = weight_norm(self.conv, name='weight', dim=2) self.padding = UniSpeechSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states
class UniSpeechPositionalConvEmbedding(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
21
3
18
0
3
0
1
2
1
0
2
3
2
12
43
7
36
10
32
0
28
10
24
4
1
2
5
5,760
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechPreTrainedModel
from .configuration_unispeech import UniSpeechConfig import torch.nn as nn import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging import math from typing import Callable, Optional, Union from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel @auto_docstring class UniSpeechPreTrainedModel(PreTrainedModel): config: UniSpeechConfig base_model_prefix = 'unispeech' main_input_name = 'input_values' supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, UniSpeechGumbelVectorQuantizer): module.weight_proj.weight.data.normal_(mean=0.0, std=1) module.weight_proj.bias.data.zero_() nn.init.uniform_(module.codevectors) elif isinstance(module, UniSpeechPositionalConvEmbedding): nn.init.normal_(module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels))) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, UniSpeechFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): return torch.div(input_length - kernel_size, stride, rounding_mode='floor') + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device) attention_mask[torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask
@auto_docstring class UniSpeechPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): ''' Computes the output length of the convolutional layers ''' pass def _conv_out_length(input_length, kernel_size, stride): pass def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): pass
6
2
16
2
12
3
3
0.27
1
5
3
4
3
0
3
3
75
10
51
16
46
14
40
16
35
9
1
2
13
5,761
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech/modeling_unispeech.py
transformers.models.unispeech.modeling_unispeech.UniSpeechSamePadLayer
import torch.nn as nn class UniSpeechSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, :-self.num_pad_remove] return hidden_states
class UniSpeechSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): pass def forward(self, hidden_states): pass
3
0
4
0
4
0
2
0
1
1
0
0
2
1
2
12
9
1
8
4
5
0
8
4
5
2
1
1
4
5,762
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py
transformers.models.unispeech_sat.configuration_unispeech_sat.UniSpeechSatConfig
import functools from ...configuration_utils import PretrainedConfig import operator class UniSpeechSatConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`UniSpeechSatModel`]. It is used to instantiate an UniSpeechSat model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UniSpeechSat [microsoft/unispeech-sat-base-100h-libri-ft](https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the UniSpeechSat model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`UniSpeechSatModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`UniSpeechSatModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the output of the feature encoder that's used by the quantizer. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`UniSpeechSatForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_extract_activation (`str, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 2, 2)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. do_stable_layer_norm (`bool`, *optional*, defaults to `False`): Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is False` corresponds to applying layer norm after the attention layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://huggingface.co/papers/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2): The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0): The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' num_codevectors_per_group (`int`, *optional*, defaults to 320): Number of entries in each quantization codebook (group). num_codevector_groups (`int`, *optional*, defaults to 2): Number of codevector groups for product codevector quantization. contrastive_logits_temperature (`float`, *optional*, defaults to 0.1): The temperature *kappa* in the contrastive loss. num_negatives (`int`, *optional*, defaults to 100): Number of negative samples for the contrastive loss. codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the quantized feature vectors. proj_codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the final projection of both the quantized and the transformer features. diversity_loss_weight (`int`, *optional*, defaults to 0.1): The weight of the codebook diversity loss component. ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`UniSpeechSatForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`UniSpeechSatForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`UniSpeechSatForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`tuple[int]` or `list[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. num_clusters (`int`, *optional*, defaults to 504): Number of clusters for weak labeling. Only relevant when using an instance of [`UniSpeechSatForPreTraining`]. Example: ```python >>> from transformers import UniSpeechSatModel, UniSpeechSatConfig >>> # Initializing a UniSpeechSat microsoft/unispeech-sat-base-100h-libri-ft style configuration >>> configuration = UniSpeechSatConfig() >>> # Initializing a model from the microsoft/unispeech-sat-base-100h-libri-ft style configuration >>> model = UniSpeechSatModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'unispeech-sat' def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction='mean', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, num_clusters=504, **kwargs): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size self.num_clusters = num_clusters self.do_stable_layer_norm = do_stable_layer_norm self.use_weighted_layer_sum = use_weighted_layer_sum if len(self.conv_stride) != self.num_feat_extract_layers or len(self.conv_kernel) != self.num_feat_extract_layers or len(self.conv_dim) != self.num_feat_extract_layers: raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.') self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks self.num_codevectors_per_group = num_codevectors_per_group self.num_codevector_groups = num_codevector_groups self.contrastive_logits_temperature = contrastive_logits_temperature self.feat_quantizer_dropout = feat_quantizer_dropout self.num_negatives = num_negatives self.codevector_dim = codevector_dim self.proj_codevector_dim = proj_codevector_dim self.diversity_loss_weight = diversity_loss_weight self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity self.classifier_proj_size = classifier_proj_size self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1)
null
4
1
64
3
58
3
2
1.36
1
3
0
0
2
48
2
2
298
17
119
106
62
162
56
52
53
2
1
1
3
5,763
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.AMSoftmaxLoss
import torch import torch.nn as nn class AMSoftmaxLoss(nn.Module): def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): super().__init__() self.scale = scale self.margin = margin self.num_labels = num_labels self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True) self.loss = nn.CrossEntropyLoss() def forward(self, hidden_states, labels): labels = labels.flatten() weight = nn.functional.normalize(self.weight, dim=0) hidden_states = nn.functional.normalize(hidden_states, dim=1) cos_theta = torch.mm(hidden_states, weight) psi = cos_theta - self.margin onehot = nn.functional.one_hot(labels, self.num_labels) logits = self.scale * torch.where(onehot.bool(), psi, cos_theta) loss = self.loss(logits, labels) return loss
class AMSoftmaxLoss(nn.Module): def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): pass def forward(self, hidden_states, labels): pass
3
0
10
1
9
0
1
0
1
1
0
0
2
5
2
12
21
3
18
14
15
0
18
14
15
1
1
0
2
5,764
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.TDNNLayer
import warnings from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available, logging import torch import torch.nn as nn class TDNNLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] self.out_conv_dim = config.tdnn_dim[layer_id] self.kernel_size = config.tdnn_kernel[layer_id] self.dilation = config.tdnn_dilation[layer_id] self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) self.activation = nn.ReLU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if is_peft_available(): from peft.tuners.lora import LoraLayer if is_peft_available(): if isinstance(self.kernel, LoraLayer): warnings.warn("Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. You should exclude TDNNLayer from LoRA's target modules.") hidden_states = hidden_states.transpose(1, 2) weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2) hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.activation(hidden_states) return hidden_states
class TDNNLayer(nn.Module): def __init__(self, config, layer_id=0): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
14
2
11
1
3
0.04
1
2
0
0
2
6
2
12
29
5
23
11
19
1
20
11
16
3
1
2
5
5,765
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatAttention
import torch from ...processing_utils import Unpack from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from .configuration_unispeech_sat import UniSpeechSatConfig from typing import Callable, Optional, Union from ...modeling_flash_attention_utils import FlashAttentionKwargs import torch.nn as nn class UniSpeechSatAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[UniSpeechSatConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) current_states = key_value_states if is_cross_attention else hidden_states key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights, None)
class UniSpeechSatAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[UniSpeechSatConfig]=None): pass def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
3
2
50
7
35
8
5
0.24
1
7
1
2
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
5,766
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatAttnAdapterLayer
import torch import torch.nn as nn class UniSpeechSatAttnAdapterLayer(nn.Module): def __init__(self, config): """ Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed up training throughput. """ super().__init__() self.input_dim = config.adapter_attn_dim self.hidden_dim = config.hidden_size self.norm = nn.LayerNorm(self.hidden_dim) self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim) self.act_fn = nn.ReLU() self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim) def forward(self, hidden_states: torch.FloatTensor): hidden_states = self.norm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states
class UniSpeechSatAttnAdapterLayer(nn.Module): def __init__(self, config): ''' Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed up training throughput. ''' pass def forward(self, hidden_states: torch.FloatTensor): pass
3
1
11
2
7
2
1
0.27
1
1
0
0
2
6
2
12
23
4
15
9
12
4
15
9
12
1
1
0
2
5,767
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatEncoder
import torch from ...integrations.fsdp import is_fsdp_managed_module from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput from ...integrations.deepspeed import is_deepspeed_zero3_enabled from typing import Callable, Optional, Union import torch.nn as nn from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa class UniSpeechSatEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechSatPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([UniSpeechSatEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 attention_mask = self._update_full_mask(attention_mask, hidden_states) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.layerdrop if not skip_the_layer or synced_gpus: layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): if attention_mask is not None: if 'flash' in self.config._attn_implementation: attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask
class UniSpeechSatEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): pass def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): pass
4
0
41
5
33
3
8
0.07
1
8
3
0
2
7
2
12
83
11
67
26
57
5
45
19
42
15
1
3
16
5,768
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatEncoderLayer
import torch.nn as nn from ...modeling_layers import GradientCheckpointingLayer class UniSpeechSatEncoderLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.attention = UniSpeechSatAttention(embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechSatFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, output_attentions=False): attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class UniSpeechSatEncoderLayer(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, output_attentions=False): pass
3
0
16
3
13
0
2
0
1
2
1
0
2
5
2
12
33
6
27
11
24
0
20
11
17
2
1
1
3
5,769
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatEncoderLayerStableLayerNorm
from ...modeling_layers import GradientCheckpointingLayer import torch.nn as nn import torch from typing import Callable, Optional, Union class UniSpeechSatEncoderLayerStableLayerNorm(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.attention = UniSpeechSatAttention(embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechSatFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if getattr(config, 'adapter_attn_dim', None) is not None: self.adapter_layer = UniSpeechSatAttnAdapterLayer(config) else: self.adapter_layer = None def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) if self.adapter_layer is not None: hidden_states = hidden_states + self.adapter_layer(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class UniSpeechSatEncoderLayerStableLayerNorm(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False): pass
3
0
21
3
18
0
3
0
1
5
2
0
2
6
2
12
43
6
37
17
29
0
24
12
21
3
1
1
5
5,770
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatEncoderStableLayerNorm
import torch from ...integrations.fsdp import is_fsdp_managed_module from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput from ...integrations.deepspeed import is_deepspeed_zero3_enabled from typing import Callable, Optional, Union import torch.nn as nn from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa class UniSpeechSatEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechSatPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([UniSpeechSatEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 attention_mask = self._update_full_mask(attention_mask, hidden_states) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.layerdrop if not skip_the_layer or synced_gpus: layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): if attention_mask is not None: if 'flash' in self.config._attn_implementation: attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask
class UniSpeechSatEncoderStableLayerNorm(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True): pass def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): pass
4
0
43
6
34
3
8
0.09
1
6
3
0
2
7
2
12
87
12
69
27
59
6
45
19
42
15
1
3
16
5,771
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatFeatureEncoder
import torch.nn as nn class UniSpeechSatFeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == 'group': conv_layers = [UniSpeechSatGroupNormConvLayer(config, layer_id=0)] + [UniSpeechSatNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)] elif config.feat_extract_norm == 'layer': conv_layers = [UniSpeechSatLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)] else: raise ValueError(f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']") self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: hidden_states = conv_layer(hidden_states) return hidden_states
class UniSpeechSatFeatureEncoder(nn.Module): '''Construct the features from raw audio waveform''' def __init__(self, config): pass def _freeze_parameters(self): pass def forward(self, input_values): pass
4
1
13
1
12
0
3
0.06
1
6
3
1
3
3
3
13
45
7
36
11
32
2
23
11
19
4
1
2
9
5,772
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatFeatureProjection
import torch.nn as nn class UniSpeechSatFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return (hidden_states, norm_hidden_states)
class UniSpeechSatFeatureProjection(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
6
0
5
1
1
0.09
1
1
0
0
2
3
2
12
13
1
11
7
8
1
11
7
8
1
1
0
2
5,773
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatFeedForward
from ...activations import ACT2FN import torch.nn as nn class UniSpeechSatFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states
class UniSpeechSatFeedForward(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
10
2
9
0
2
0
1
2
0
0
2
5
2
12
22
4
18
8
15
0
17
8
14
2
1
1
3
5,774
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForAudioFrameClassification
import warnings import torch from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available, logging from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput from typing import Callable, Optional, Union from torch.nn import CrossEntropyLoss import torch.nn as nn @auto_docstring class UniSpeechSatForAudioFrameClassification(UniSpeechSatPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, 'add_adapter') and config.add_adapter: raise ValueError('Audio frame classification does not support the use of UniSpeechSat adapters (config.add_adapter=True)') self.unispeech_sat = UniSpeechSatModel(config) num_layers = config.num_hidden_layers + 1 if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.num_labels = config.num_labels self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]: """ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechSatProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech_sat(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class UniSpeechSatForAudioFrameClassification(UniSpeechSatPreTrainedModel): def __init__(self, config): pass def freeze_feature_extractor(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass def freeze_feature_encoder(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass def freeze_base_model(self): ''' Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. ''' pass @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]: ''' input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechSatProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
8
4
18
2
13
4
3
0.26
1
7
2
0
5
4
5
8
104
13
73
28
51
19
39
19
33
6
2
1
13
5,775
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForCTC
import warnings import torch from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available, logging from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput from typing import Callable, Optional, Union import torch.nn as nn @auto_docstring(custom_intro='\n UniSpeechSat Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n ') class UniSpeechSatForCTC(UniSpeechSatPreTrainedModel): def __init__(self, config, target_lang: Optional[str]=None): """ target_lang (`str`, *optional*): Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechSatForCTC`] with adapters. Uses 'eng' by default. """ super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError(f"You are trying to instantiate {self.__class__} with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `UniSpeechSatForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.") output_hidden_size = config.output_hidden_size if hasattr(config, 'add_adapter') and config.add_adapter else config.hidden_size self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) self.post_init() def tie_weights(self): """ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when passing `target_lang=...` to `from_pretrained(...)`. This method is **not** supposed to be called by the user and is prone to be changed in the future. """ target_lang = self.target_lang if target_lang is not None and getattr(self.config, 'adapter_attn_dim', None) is None: raise ValueError(f'Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.') elif target_lang is None and getattr(self.config, 'adapter_attn_dim', None) is not None: logger.info("By default `target_lang` is set to 'eng'.") elif target_lang is not None: self.load_adapter(target_lang, force_load=True) def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and labels.max() >= self.config.vocab_size: raise ValueError(f'Label values must be <= vocab_size: {self.config.vocab_size}') outputs = self.unispeech_sat(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: attention_mask = attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss(log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return (loss,) + output if loss is not None else output return CausalLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n UniSpeechSat Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n ') class UniSpeechSatForCTC(UniSpeechSatPreTrainedModel): def __init__(self, config, target_lang: Optional[str]=None): ''' target_lang (`str`, *optional*): Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechSatForCTC`] with adapters. Uses 'eng' by default. ''' pass def tie_weights(self): ''' This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when passing `target_lang=...` to `from_pretrained(...)`. This method is **not** supposed to be called by the user and is prone to be changed in the future. ''' pass def freeze_feature_extractor(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass def freeze_feature_encoder(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass def freeze_base_model(self): ''' Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. ''' pass @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. ''' pass
9
6
23
3
14
6
3
0.35
1
8
2
0
6
4
6
9
149
22
94
33
71
33
47
24
40
7
2
2
18
5,776
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTraining
import warnings from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available, logging import torch from .configuration_unispeech_sat import UniSpeechSatConfig from typing import Callable, Optional, Union import torch.nn as nn @auto_docstring(custom_intro='\n UniSpeechSat Model with a vector-quantization module and ctc loss for pre-training.\n ') class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel): def __init__(self, config: UniSpeechSatConfig): super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) self.dropout_features = nn.Dropout(config.feat_quantizer_dropout) self.quantizer = UniSpeechSatGumbelVectorQuantizer(config) self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim) self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim) self.dropout = nn.Dropout(config.final_dropout) self.speaker_proj = nn.Linear(config.hidden_size, config.codevector_dim) self.label_embeddings_concat = nn.Parameter(torch.FloatTensor(config.num_clusters, config.codevector_dim)) self.label_embeddings_concat.data.zero_() self.layer_norm_for_extract = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if self.config.do_stable_layer_norm: self.layer_norm_for_extract.requires_grad = False self.post_init() def set_gumbel_temperature(self, temperature: int): """ Set the Gumbel softmax temperature to a given value. Only necessary for training """ self.quantizer.temperature = temperature def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() @staticmethod def compute_contrastive_logits(target_features: torch.FloatTensor, negative_features: torch.FloatTensor, predicted_features: torch.FloatTensor, temperature: int=1): """ Compute logits for contrastive loss based using cosine similarity as the distance measure between `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. """ target_features = torch.cat([target_features, negative_features], dim=0) logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1) logits = logits.type_as(target_features) logits = logits / temperature return logits @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, UniSpeechSatForPreTrainingOutput]: """ Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, UniSpeechSatForPreTraining >>> from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-sat-base") >>> model = UniSpeechSatForPreTraining.from_pretrained("microsoft/unispeech-sat-base") >>> # TODO: Add full pretraining example ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.unispeech_sat(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) transformer_features = outputs[0] extract_features = self.dropout_features(outputs[1]) logits = extract_features loss = quantized_features = codevector_perplexity = None if not return_dict: if loss is not None: return (loss, logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return (logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return UniSpeechSatForPreTrainingOutput(loss=loss, logits=logits, projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n UniSpeechSat Model with a vector-quantization module and ctc loss for pre-training.\n ') class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel): def __init__(self, config: UniSpeechSatConfig): pass def set_gumbel_temperature(self, temperature: int): ''' Set the Gumbel softmax temperature to a given value. Only necessary for training ''' pass def freeze_feature_extractor(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. ''' pass def freeze_feature_encoder(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass @staticmethod def compute_contrastive_logits(target_features: torch.FloatTensor, negative_features: torch.FloatTensor, predicted_features: torch.FloatTensor, temperature: int=1): ''' Compute logits for contrastive loss based using cosine similarity as the distance measure between `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. ''' pass @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, UniSpeechSatForPreTrainingOutput]: ''' Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, UniSpeechSatForPreTraining >>> from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-sat-base") >>> model = UniSpeechSatForPreTraining.from_pretrained("microsoft/unispeech-sat-base") >>> # TODO: Add full pretraining example ```''' pass
10
5
21
3
12
7
2
0.54
1
9
4
0
5
9
6
9
135
21
74
36
52
40
41
22
34
4
2
2
10
5,777
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput
import torch from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available, logging from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput from typing import Callable, Optional, Union from dataclasses import dataclass import torch.nn as nn @dataclass @auto_docstring(custom_intro='\n Output type of [`UniSpeechSatForPreTrainingOutput`], with potential hidden states and attentions.\n ') class UniSpeechSatForPreTrainingOutput(ModelOutput): """ loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://huggingface.co/papers/2006.11477). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`, *optional*): Prediction scores of the contrastive loss model, i.e. the output of the model before the final softmax. projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. codevector_perplexity (`torch.FloatTensor` of shape `(1,)`): The perplexity of the codevector distribution, used to measure the diversity of the codebook. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None projected_states: Optional[torch.FloatTensor] = None projected_quantized_states: Optional[torch.FloatTensor] = None codevector_perplexity: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`UniSpeechSatForPreTrainingOutput`], with potential hidden states and attentions.\n ') class UniSpeechSatForPreTrainingOutput(ModelOutput): ''' loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://huggingface.co/papers/2006.11477). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`, *optional*): Prediction scores of the contrastive loss model, i.e. the output of the model before the final softmax. projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. codevector_perplexity (`torch.FloatTensor` of shape `(1,)`): The perplexity of the codevector distribution, used to measure the diversity of the codebook. ''' pass
3
1
0
0
0
0
0
2.75
1
0
0
0
0
0
0
0
34
4
8
8
7
22
8
8
7
0
1
0
0
5,778
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForSequenceClassification
import warnings import torch from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available, logging from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput from typing import Callable, Optional, Union from torch.nn import CrossEntropyLoss import torch.nn as nn @auto_docstring(custom_intro='\n UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n ') class UniSpeechSatForSequenceClassification(UniSpeechSatPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, 'add_adapter') and config.add_adapter: raise ValueError('Sequence classification does not support the use of UniSpeechSat adapters (config.add_adapter=True)') self.unispeech_sat = UniSpeechSatModel(config) num_layers = config.num_hidden_layers + 1 if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, SequenceClassifierOutput]: """ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechSatProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech_sat(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n ') class UniSpeechSatForSequenceClassification(UniSpeechSatPreTrainedModel): def __init__(self, config): pass def freeze_feature_extractor(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. ''' pass def freeze_feature_encoder(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass def freeze_base_model(self): ''' Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. ''' pass @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, SequenceClassifierOutput]: ''' input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechSatProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
8
4
20
2
14
4
3
0.3
1
7
2
0
5
4
5
8
117
14
80
31
59
24
46
22
40
8
2
1
15
5,779
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForXVector
import warnings import torch from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available, logging from ...modeling_outputs import BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput from typing import Callable, Optional, Union import torch.nn as nn @auto_docstring(custom_intro='\n UniSpeechSat Model with an XVector feature extraction head on top for tasks like Speaker Verification.\n ') class UniSpeechSatForXVector(UniSpeechSatPreTrainedModel): def __init__(self, config): super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) num_layers = config.num_hidden_layers + 1 if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0]) tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))] self.tdnn = nn.ModuleList(tdnn_layers) self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim) self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim) self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels) self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the TDNN layers """ def _conv_out_length(input_length, kernel_size, stride): return (input_length - kernel_size) // stride + 1 for kernel_size in self.config.tdnn_kernel: input_lengths = _conv_out_length(input_lengths, kernel_size, 1) return input_lengths @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, XVectorOutput]: """ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechSatProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech_sat(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) for tdnn_layer in self.tdnn: hidden_states = tdnn_layer(hidden_states) if attention_mask is None: mean_features = hidden_states.mean(dim=1) std_features = hidden_states.std(dim=1) else: feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) mean_features = [] std_features = [] for i, length in enumerate(tdnn_output_lengths): mean_features.append(hidden_states[i, :length].mean(dim=0)) std_features.append(hidden_states[i, :length].std(dim=0)) mean_features = torch.stack(mean_features) std_features = torch.stack(std_features) statistic_pooling = torch.cat([mean_features, std_features], dim=-1) output_embeddings = self.feature_extractor(statistic_pooling) logits = self.classifier(output_embeddings) loss = None if labels is not None: loss = self.objective(logits, labels) if not return_dict: output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] return (loss,) + output if loss is not None else output return XVectorOutput(loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n UniSpeechSat Model with an XVector feature extraction head on top for tasks like Speaker Verification.\n ') class UniSpeechSatForXVector(UniSpeechSatPreTrainedModel): def __init__(self, config): pass def freeze_feature_extractor(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass def freeze_feature_encoder(self): ''' Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. ''' pass def freeze_base_model(self): ''' Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. ''' pass def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): ''' Computes the output length of the TDNN layers ''' pass def _conv_out_length(input_length, kernel_size, stride): pass @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, XVectorOutput]: ''' input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechSatProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
10
5
19
3
13
4
3
0.26
1
11
4
0
6
7
6
9
144
23
97
42
73
25
63
33
55
10
2
2
19
5,780
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatGroupNormConvLayer
from ...activations import ACT2FN import torch.nn as nn from ...modeling_layers import GradientCheckpointingLayer class UniSpeechSatGroupNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states
class UniSpeechSatGroupNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): pass def forward(self, hidden_states): pass
3
0
10
1
9
0
2
0
1
1
0
0
2
5
2
12
22
3
19
8
16
0
13
8
10
2
1
0
3
5,781
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatGumbelVectorQuantizer
import torch import torch.nn as nn class UniSpeechSatGumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError(f'`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups` {self.num_groups} for concatenation') self.codevectors = nn.Parameter(torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)) self.weight_proj = nn.Linear(config.hidden_size, self.num_groups * self.num_vars) self.temperature = 2 @staticmethod def _compute_perplexity(probs, mask=None): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-07), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True).type_as(hidden_states) codevector_soft_dist = torch.softmax(hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1) perplexity = self._compute_perplexity(codevector_soft_dist) else: codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(-1, codevector_idx.view(-1, 1), 1.0) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return (codevectors, perplexity)
class UniSpeechSatGumbelVectorQuantizer(nn.Module): ''' Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information. ''' def __init__(self, config): pass @staticmethod def _compute_perplexity(probs, mask=None): pass def forward(self, hidden_states): pass
5
1
20
3
14
3
2
0.27
1
2
0
0
2
5
3
13
68
12
44
19
39
12
31
18
27
2
1
1
5
5,782
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatLayerNormConvLayer
from ...activations import ACT2FN import torch.nn as nn from ...modeling_layers import GradientCheckpointingLayer class UniSpeechSatLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states
class UniSpeechSatLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): pass def forward(self, hidden_states): pass
3
0
12
2
10
0
2
0
1
1
0
0
2
5
2
12
25
4
21
8
18
0
15
8
12
2
1
0
3
5,783
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatModel
from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available, logging import torch from .configuration_unispeech_sat import UniSpeechSatConfig from typing import Callable, Optional, Union import torch.nn as nn @auto_docstring class UniSpeechSatModel(UniSpeechSatPreTrainedModel): def __init__(self, config: UniSpeechSatConfig): super().__init__(config) self.config = config self.feature_extractor = UniSpeechSatFeatureEncoder(config) self.feature_projection = UniSpeechSatFeatureProjection(config) self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = UniSpeechSatEncoderStableLayerNorm(config) else: self.encoder = UniSpeechSatEncoder(config) self.post_init() def _mask_hidden_states(self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://huggingface.co/papers/1904.08779). """ if not getattr(self.config, 'apply_spec_augment', True): return hidden_states batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: mask_feature_indices = _compute_mask_indices((batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, UniSpeechSatBaseModelOutput]: """ mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask) encoder_outputs = self.encoder(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return UniSpeechSatBaseModelOutput(last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
@auto_docstring class UniSpeechSatModel(UniSpeechSatPreTrainedModel): def __init__(self, config: UniSpeechSatConfig): pass def _mask_hidden_states(self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None): ''' Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://huggingface.co/papers/1904.08779). ''' pass @auto_docstring def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, UniSpeechSatBaseModelOutput]: ''' mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. ''' pass
6
2
35
5
27
3
4
0.12
1
9
6
0
3
5
3
6
118
17
90
28
65
11
41
14
37
6
2
1
13
5,784
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatNoLayerNormConvLayer
from ...activations import ACT2FN import torch.nn as nn from ...modeling_layers import GradientCheckpointingLayer class UniSpeechSatNoLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states
class UniSpeechSatNoLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): pass def forward(self, hidden_states): pass
3
0
9
1
8
0
2
0
1
1
0
0
2
4
2
12
19
2
17
7
14
0
11
7
8
2
1
0
3
5,785
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatPositionalConvEmbedding
from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled import torch.nn as nn class UniSpeechSatPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, 'weight_norm'): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name='weight', dim=2) if hasattr(self.conv, 'parametrizations'): weight_g = self.conv.parametrizations.weight.original0 weight_v = self.conv.parametrizations.weight.original1 else: weight_g = self.conv.weight_g weight_v = self.conv.weight_v deepspeed.zero.register_external_parameter(self, weight_v) deepspeed.zero.register_external_parameter(self, weight_g) else: self.conv = weight_norm(self.conv, name='weight', dim=2) self.padding = UniSpeechSatSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states
class UniSpeechSatPositionalConvEmbedding(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
21
3
18
0
3
0
1
2
1
0
2
3
2
12
43
7
36
10
32
0
28
10
24
4
1
2
5
5,786
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatPreTrainedModel
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from .configuration_unispeech_sat import UniSpeechSatConfig from typing import Callable, Optional, Union import torch.nn as nn import torch from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available, logging import math @auto_docstring class UniSpeechSatPreTrainedModel(PreTrainedModel): config: UniSpeechSatConfig base_model_prefix = 'unispeech_sat' main_input_name = 'input_values' supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, UniSpeechSatGumbelVectorQuantizer): module.weight_proj.weight.data.normal_(mean=0.0, std=1) module.weight_proj.bias.data.zero_() nn.init.uniform_(module.codevectors) elif isinstance(module, UniSpeechSatPositionalConvEmbedding): nn.init.normal_(module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels))) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, UniSpeechSatFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): return torch.div(input_length - kernel_size, stride, rounding_mode='floor') + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device) attention_mask[torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask
@auto_docstring class UniSpeechSatPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): ''' Computes the output length of the convolutional layers ''' pass def _conv_out_length(input_length, kernel_size, stride): pass def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): pass
6
2
16
2
12
3
3
0.27
1
5
3
6
3
0
3
3
75
10
51
16
46
14
40
16
35
9
1
2
13
5,787
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
transformers.models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatSamePadLayer
import torch.nn as nn class UniSpeechSatSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, :-self.num_pad_remove] return hidden_states
class UniSpeechSatSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): pass def forward(self, hidden_states): pass
3
0
4
0
4
0
2
0
1
1
0
0
2
1
2
12
9
1
8
4
5
0
8
4
5
2
1
1
4
5,788
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/univnet/configuration_univnet.py
transformers.models.univnet.configuration_univnet.UnivNetConfig
from ...configuration_utils import PretrainedConfig class UnivNetConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`UnivNetModel`]. It is used to instantiate a UnivNet vocoder model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UnivNet [dg845/univnet-dev](https://huggingface.co/dg845/univnet-dev) architecture, which corresponds to the 'c32' architecture in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/master/config/default_c32.yaml). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: model_in_channels (`int`, *optional*, defaults to 64): The number of input channels for the UnivNet residual network. This should correspond to `noise_sequence.shape[1]` and the value used in the [`UnivNetFeatureExtractor`] class. model_hidden_channels (`int`, *optional*, defaults to 32): The number of hidden channels of each residual block in the UnivNet residual network. num_mel_bins (`int`, *optional*, defaults to 100): The number of frequency bins in the conditioning log-mel spectrogram. This should correspond to the value used in the [`UnivNetFeatureExtractor`] class. resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 3, 3]`): A tuple of integers defining the kernel sizes of the 1D convolutional layers in the UnivNet residual network. The length of `resblock_kernel_sizes` defines the number of resnet blocks and should match that of `resblock_stride_sizes` and `resblock_dilation_sizes`. resblock_stride_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[8, 8, 4]`): A tuple of integers defining the stride sizes of the 1D convolutional layers in the UnivNet residual network. The length of `resblock_stride_sizes` should match that of `resblock_kernel_sizes` and `resblock_dilation_sizes`. resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]]`): A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the UnivNet residual network. The length of `resblock_dilation_sizes` should match that of `resblock_kernel_sizes` and `resblock_stride_sizes`. The length of each nested list in `resblock_dilation_sizes` defines the number of convolutional layers per resnet block. kernel_predictor_num_blocks (`int`, *optional*, defaults to 3): The number of residual blocks in the kernel predictor network, which calculates the kernel and bias for each location variable convolution layer in the UnivNet residual network. kernel_predictor_hidden_channels (`int`, *optional*, defaults to 64): The number of hidden channels for each residual block in the kernel predictor network. kernel_predictor_conv_size (`int`, *optional*, defaults to 3): The kernel size of each 1D convolutional layer in the kernel predictor network. kernel_predictor_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for each residual block in the kernel predictor network. initializer_range (`float`, *optional*, defaults to 0.01): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. leaky_relu_slope (`float`, *optional*, defaults to 0.2): The angle of the negative slope used by the leaky ReLU activation. Example: ```python >>> from transformers import UnivNetModel, UnivNetConfig >>> # Initializing a Tortoise TTS style configuration >>> configuration = UnivNetConfig() >>> # Initializing a model (with random weights) from the Tortoise TTS style configuration >>> model = UnivNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = 'univnet' def __init__(self, model_in_channels=64, model_hidden_channels=32, num_mel_bins=100, resblock_kernel_sizes=[3, 3, 3], resblock_stride_sizes=[8, 8, 4], resblock_dilation_sizes=[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]], kernel_predictor_num_blocks=3, kernel_predictor_hidden_channels=64, kernel_predictor_conv_size=3, kernel_predictor_dropout=0.0, initializer_range=0.01, leaky_relu_slope=0.2, **kwargs): if not len(resblock_kernel_sizes) == len(resblock_stride_sizes) == len(resblock_dilation_sizes): raise ValueError('`resblock_kernel_sizes`, `resblock_stride_sizes`, and `resblock_dilation_sizes` must all have the same length (which will be the number of resnet blocks in the model).') self.model_in_channels = model_in_channels self.model_hidden_channels = model_hidden_channels self.num_mel_bins = num_mel_bins self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_stride_sizes = resblock_stride_sizes self.resblock_dilation_sizes = resblock_dilation_sizes self.kernel_predictor_num_blocks = kernel_predictor_num_blocks self.kernel_predictor_hidden_channels = kernel_predictor_hidden_channels self.kernel_predictor_conv_size = kernel_predictor_conv_size self.kernel_predictor_dropout = kernel_predictor_dropout self.initializer_range = initializer_range self.leaky_relu_slope = leaky_relu_slope super().__init__(**kwargs)
class UnivNetConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`UnivNetModel`]. It is used to instantiate a UnivNet vocoder model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UnivNet [dg845/univnet-dev](https://huggingface.co/dg845/univnet-dev) architecture, which corresponds to the 'c32' architecture in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/master/config/default_c32.yaml). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: model_in_channels (`int`, *optional*, defaults to 64): The number of input channels for the UnivNet residual network. This should correspond to `noise_sequence.shape[1]` and the value used in the [`UnivNetFeatureExtractor`] class. model_hidden_channels (`int`, *optional*, defaults to 32): The number of hidden channels of each residual block in the UnivNet residual network. num_mel_bins (`int`, *optional*, defaults to 100): The number of frequency bins in the conditioning log-mel spectrogram. This should correspond to the value used in the [`UnivNetFeatureExtractor`] class. resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 3, 3]`): A tuple of integers defining the kernel sizes of the 1D convolutional layers in the UnivNet residual network. The length of `resblock_kernel_sizes` defines the number of resnet blocks and should match that of `resblock_stride_sizes` and `resblock_dilation_sizes`. resblock_stride_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[8, 8, 4]`): A tuple of integers defining the stride sizes of the 1D convolutional layers in the UnivNet residual network. The length of `resblock_stride_sizes` should match that of `resblock_kernel_sizes` and `resblock_dilation_sizes`. resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]]`): A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the UnivNet residual network. The length of `resblock_dilation_sizes` should match that of `resblock_kernel_sizes` and `resblock_stride_sizes`. The length of each nested list in `resblock_dilation_sizes` defines the number of convolutional layers per resnet block. kernel_predictor_num_blocks (`int`, *optional*, defaults to 3): The number of residual blocks in the kernel predictor network, which calculates the kernel and bias for each location variable convolution layer in the UnivNet residual network. kernel_predictor_hidden_channels (`int`, *optional*, defaults to 64): The number of hidden channels for each residual block in the kernel predictor network. kernel_predictor_conv_size (`int`, *optional*, defaults to 3): The kernel size of each 1D convolutional layer in the kernel predictor network. kernel_predictor_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for each residual block in the kernel predictor network. initializer_range (`float`, *optional*, defaults to 0.01): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. leaky_relu_slope (`float`, *optional*, defaults to 0.2): The angle of the negative slope used by the leaky ReLU activation. Example: ```python >>> from transformers import UnivNetModel, UnivNetConfig >>> # Initializing a Tortoise TTS style configuration >>> configuration = UnivNetConfig() >>> # Initializing a model (with random weights) from the Tortoise TTS style configuration >>> model = UnivNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` ''' def __init__(self, model_in_channels=64, model_hidden_channels=32, num_mel_bins=100, resblock_kernel_sizes=[3, 3, 3], resblock_stride_sizes=[8, 8, 4], resblock_dilation_sizes=[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]], kernel_predictor_num_blocks=3, kernel_predictor_hidden_channels=64, kernel_predictor_conv_size=3, kernel_predictor_dropout=0.0, initializer_range=0.01, leaky_relu_slope=0.2, **kwargs): pass
2
1
35
1
34
0
2
1.5
1
2
0
0
1
12
1
1
100
10
36
30
19
54
18
15
16
2
1
1
2
5,789
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/univnet/feature_extraction_univnet.py
transformers.models.univnet.feature_extraction_univnet.UnivNetFeatureExtractor
from typing import Any, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...utils import PaddingStrategy, TensorType, logging from ...feature_extraction_utils import BatchFeature from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function class UnivNetFeatureExtractor(SequenceFeatureExtractor): """ Constructs a UnivNet feature extractor. This class extracts log-mel-filter bank features from raw speech using the short time Fourier Transform (STFT). The STFT implementation follows that of TacoTron 2 and Hifi-GAN. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value to pad with when applying the padding strategy defined by the `padding` argument to [`UnivNetFeatureExtractor.__call__`]. Should correspond to audio silence. The `pad_end` argument to `__call__` will also use this padding value. do_normalize (`bool`, *optional*, defaults to `False`): Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve the performance for some models. num_mel_bins (`int`, *optional*, defaults to 100): The number of mel-frequency bins in the extracted spectrogram features. This should match `UnivNetModel.config.num_mel_bins`. hop_length (`int`, *optional*, defaults to 256): The direct number of samples between sliding windows. Otherwise referred to as "shift" in many papers. Note that this is different from other audio feature extractors such as [`SpeechT5FeatureExtractor`] which take the `hop_length` in ms. win_length (`int`, *optional*, defaults to 1024): The direct number of samples for each sliding window. Note that this is different from other audio feature extractors such as [`SpeechT5FeatureExtractor`] which take the `win_length` in ms. win_function (`str`, *optional*, defaults to `"hann_window"`): Name for the window function used for windowing, must be accessible via `torch.{win_function}` filter_length (`int`, *optional*, defaults to 1024): The number of FFT components to use. If `None`, this is determined using `transformers.audio_utils.optimal_fft_length`. max_length_s (`int`, *optional*, defaults to 10): The maximum input length of the model in seconds. This is used to pad the audio. fmin (`float`, *optional*, defaults to 0.0): Minimum mel frequency in Hz. fmax (`float`, *optional*): Maximum mel frequency in Hz. If not set, defaults to `sampling_rate / 2`. mel_floor (`float`, *optional*, defaults to 1e-09): Minimum value of mel frequency banks. Note that the way [`UnivNetFeatureExtractor`] uses `mel_floor` is different than in [`transformers.audio_utils.spectrogram`]. center (`bool`, *optional*, defaults to `False`): Whether to pad the waveform so that frame `t` is centered around time `t * hop_length`. If `False`, frame `t` will start at time `t * hop_length`. compression_factor (`float`, *optional*, defaults to 1.0): The multiplicative compression factor for dynamic range compression during spectral normalization. compression_clip_val (`float`, *optional*, defaults to 1e-05): The clip value applied to the waveform before applying dynamic range compression during spectral normalization. normalize_min (`float`, *optional*, defaults to -11.512925148010254): The min value used for Tacotron 2-style linear normalization. The default is the original value from the Tacotron 2 implementation. normalize_max (`float`, *optional*, defaults to 2.3143386840820312): The max value used for Tacotron 2-style linear normalization. The default is the original value from the Tacotron 2 implementation. model_in_channels (`int`, *optional*, defaults to 64): The number of input channels to the [`UnivNetModel`] model. This should match `UnivNetModel.config.model_in_channels`. pad_end_length (`int`, *optional*, defaults to 10): If padding the end of each waveform, the number of spectrogram frames worth of samples to append. The number of appended samples will be `pad_end_length * hop_length`. return_attention_mask (`bool`, *optional*, defaults to `True`): Whether or not [`~UnivNetFeatureExtractor.__call__`] should return `attention_mask`. """ model_input_names = ['input_features', 'noise_sequence', 'padding_mask'] def __init__(self, feature_size: int=1, sampling_rate: int=24000, padding_value: float=0.0, do_normalize: bool=False, num_mel_bins: int=100, hop_length: int=256, win_length: int=1024, win_function: str='hann_window', filter_length: Optional[int]=1024, max_length_s: int=10, fmin: float=0.0, fmax: Optional[float]=None, mel_floor: float=1e-09, center: bool=False, compression_factor: float=1.0, compression_clip_val: float=1e-05, normalize_min: float=-11.512925148010254, normalize_max: float=2.3143386840820312, model_in_channels: int=64, pad_end_length: int=10, return_attention_mask=True, **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, return_attention_mask=return_attention_mask, **kwargs) self.do_normalize = do_normalize self.num_mel_bins = num_mel_bins self.hop_length = hop_length self.win_length = win_length self.win_function = win_function self.filter_length = filter_length self.fmin = fmin if fmax is None: fmax = float(sampling_rate) / 2 self.fmax = fmax self.mel_floor = mel_floor self.max_length_s = max_length_s self.num_max_samples = max_length_s * sampling_rate if self.filter_length is None: self.n_fft = optimal_fft_length(self.win_length) else: self.n_fft = self.filter_length self.n_freqs = self.n_fft // 2 + 1 self.window = window_function(window_length=self.win_length, name=self.win_function, periodic=True) self.mel_filters = mel_filter_bank(num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm='slaney', mel_scale='slaney') self.center = center self.compression_factor = compression_factor self.compression_clip_val = compression_clip_val self.normalize_min = normalize_min self.normalize_max = normalize_max self.model_in_channels = model_in_channels self.pad_end_length = pad_end_length def normalize(self, spectrogram): return 2 * ((spectrogram - self.normalize_min) / (self.normalize_max - self.normalize_min)) - 1 def denormalize(self, spectrogram): return self.normalize_min + (self.normalize_max - self.normalize_min) * ((spectrogram + 1) / 2) def mel_spectrogram(self, waveform: np.ndarray) -> np.ndarray: """ Calculates log MEL spectrograms from a batch of waveforms. Note that the input waveform(s) will be padded by `int(self.n_fft - self.hop_length) / 2` on both sides using the `reflect` padding mode. Args: waveform (`np.ndarray` of shape `(length,)`): The input waveform. This must be a single real-valued, mono waveform. Returns: `numpy.ndarray`: Array containing a log-mel spectrogram of shape `(num_frames, num_mel_bins)`. """ waveform = np.pad(waveform, (int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)), mode='reflect') complex_spectrogram = spectrogram(waveform, window=self.window, frame_length=self.n_fft, hop_length=self.hop_length, fft_length=self.n_fft, power=None, center=self.center, mel_filters=None, mel_floor=None) amplitude_spectrogram = np.sqrt(np.real(complex_spectrogram) ** 2 + np.imag(complex_spectrogram) ** 2 + self.mel_floor) mel_spectrogram = np.matmul(self.mel_filters.T, amplitude_spectrogram) log_mel_spectrogram = np.log(np.clip(mel_spectrogram, a_min=self.compression_clip_val, a_max=None) * self.compression_factor) return log_mel_spectrogram.T def generate_noise(self, noise_length: int, generator: Optional[np.random.Generator]=None) -> np.ndarray: """ Generates a random noise sequence of standard Gaussian noise for use in the `noise_sequence` argument of [`UnivNetModel.forward`]. Args: spectrogram_length (`int`): The length (dim 0) of the generated noise. model_in_channels (`int`, *optional*, defaults to `None`): The number of features (dim 1) of the generated noise. This should correspond to the `model_in_channels` of the [`UnivNetGan`] model. If not set, this will default to `self.config.model_in_channels`. generator (`numpy.random.Generator`, *optional*, defaults to `None`) An optional `numpy.random.Generator` random number generator to control noise generation. If not set, a new generator with fresh entropy will be created. Returns: `numpy.ndarray`: Array containing random standard Gaussian noise of shape `(noise_length, model_in_channels)`. """ if generator is None: generator = np.random.default_rng() noise_shape = (noise_length, self.model_in_channels) noise = generator.standard_normal(noise_shape, dtype=np.float32) return noise def batch_decode(self, waveforms, waveform_lengths=None) -> list[np.ndarray]: """ Removes padding from generated audio after running [`UnivNetModel.forward`]. This returns a ragged list of 1D audio waveform arrays and not a single tensor/array because in general the waveforms will have different lengths after removing padding. Args: waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): The batched output waveforms from the [`UnivNetModel`]. waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`, *optional*): The batched lengths of each waveform before padding. Returns: `list[np.ndarray]`: A ragged list of 1D waveform arrays with padding removed. """ waveforms = [waveform.detach().to(device='cpu', copy=True).numpy() for waveform in waveforms] if waveform_lengths is not None: waveforms = [waveform[:waveform_lengths[i]] for i, waveform in enumerate(waveforms)] return waveforms def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], sampling_rate: Optional[int]=None, padding: Union[bool, str, PaddingStrategy]=True, max_length: Optional[int]=None, truncation: bool=True, pad_to_multiple_of: Optional[int]=None, return_noise: bool=True, generator: Optional[np.random.Generator]=None, pad_end: bool=False, pad_length: Optional[int]=None, do_normalize: Optional[str]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the input `raw_speech` waveforms (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). If `pad_end = True`, that padding will occur before the `padding` strategy is applied. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`, *optional*, defaults to `True`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_noise (`bool`, *optional*, defaults to `True`): Whether to generate and return a noise waveform for use in [`UnivNetModel.forward`]. generator (`numpy.random.Generator`, *optional*, defaults to `None`): An optional `numpy.random.Generator` random number generator to use when generating noise. pad_end (`bool`, *optional*, defaults to `False`): Whether to pad the end of each waveform with silence. This can help reduce artifacts at the end of the generated audio sample; see https://github.com/seungwonpark/melgan/issues/8 for more details. This padding will be done before the padding strategy specified in `padding` is performed. pad_length (`int`, *optional*, defaults to `None`): If padding the end of each waveform, the length of the padding in spectrogram frames. If not set, this will default to `self.config.pad_end_length`. do_normalize (`bool`, *optional*): Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve the performance for some models. If not set, this will default to `self.config.do_normalize`. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.np.array` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ do_normalize = do_normalize if do_normalize is not None else self.do_normalize if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError(f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning(f'It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. Failing to do so can result in silent errors that might be hard to debug.') is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}') is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list))) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] elif not is_batched and (not isinstance(raw_speech, np.ndarray)): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) if not is_batched: raw_speech = [np.asarray(raw_speech, dtype=np.float32)] if pad_end: pad_length = pad_length if pad_length is not None else self.pad_end_length raw_speech = [np.pad(waveform, (0, pad_length * self.hop_length), constant_values=self.padding_value) for waveform in raw_speech] batched_speech = BatchFeature({'input_features': raw_speech}) padded_inputs = self.pad(batched_speech, padding=padding, max_length=max_length if max_length is not None else self.num_max_samples, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask) input_features = padded_inputs.get('input_features') mel_spectrograms = [self.mel_spectrogram(waveform) for waveform in input_features] if isinstance(input_features[0], list): batched_speech['input_features'] = [np.asarray(mel, dtype=np.float32) for mel in mel_spectrograms] else: batched_speech['input_features'] = [mel.astype(np.float32) for mel in mel_spectrograms] attention_mask = padded_inputs.get('attention_mask') if attention_mask is not None: batched_speech['padding_mask'] = [np.asarray(array, dtype=np.int32) for array in attention_mask] if return_noise: noise = [self.generate_noise(spectrogram.shape[0], generator) for spectrogram in batched_speech['input_features']] batched_speech['noise_sequence'] = noise if do_normalize: batched_speech['input_features'] = [self.normalize(spectrogram) for spectrogram in batched_speech['input_features']] if return_tensors is not None: batched_speech = batched_speech.convert_to_tensors(return_tensors) return batched_speech def to_dict(self) -> dict[str, Any]: output = super().to_dict() names = ['window', 'mel_filters', 'n_fft', 'n_freqs', 'num_max_samples'] for name in names: if name in output: del output[name] return output
class UnivNetFeatureExtractor(SequenceFeatureExtractor): ''' Constructs a UnivNet feature extractor. This class extracts log-mel-filter bank features from raw speech using the short time Fourier Transform (STFT). The STFT implementation follows that of TacoTron 2 and Hifi-GAN. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value to pad with when applying the padding strategy defined by the `padding` argument to [`UnivNetFeatureExtractor.__call__`]. Should correspond to audio silence. The `pad_end` argument to `__call__` will also use this padding value. do_normalize (`bool`, *optional*, defaults to `False`): Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve the performance for some models. num_mel_bins (`int`, *optional*, defaults to 100): The number of mel-frequency bins in the extracted spectrogram features. This should match `UnivNetModel.config.num_mel_bins`. hop_length (`int`, *optional*, defaults to 256): The direct number of samples between sliding windows. Otherwise referred to as "shift" in many papers. Note that this is different from other audio feature extractors such as [`SpeechT5FeatureExtractor`] which take the `hop_length` in ms. win_length (`int`, *optional*, defaults to 1024): The direct number of samples for each sliding window. Note that this is different from other audio feature extractors such as [`SpeechT5FeatureExtractor`] which take the `win_length` in ms. win_function (`str`, *optional*, defaults to `"hann_window"`): Name for the window function used for windowing, must be accessible via `torch.{win_function}` filter_length (`int`, *optional*, defaults to 1024): The number of FFT components to use. If `None`, this is determined using `transformers.audio_utils.optimal_fft_length`. max_length_s (`int`, *optional*, defaults to 10): The maximum input length of the model in seconds. This is used to pad the audio. fmin (`float`, *optional*, defaults to 0.0): Minimum mel frequency in Hz. fmax (`float`, *optional*): Maximum mel frequency in Hz. If not set, defaults to `sampling_rate / 2`. mel_floor (`float`, *optional*, defaults to 1e-09): Minimum value of mel frequency banks. Note that the way [`UnivNetFeatureExtractor`] uses `mel_floor` is different than in [`transformers.audio_utils.spectrogram`]. center (`bool`, *optional*, defaults to `False`): Whether to pad the waveform so that frame `t` is centered around time `t * hop_length`. If `False`, frame `t` will start at time `t * hop_length`. compression_factor (`float`, *optional*, defaults to 1.0): The multiplicative compression factor for dynamic range compression during spectral normalization. compression_clip_val (`float`, *optional*, defaults to 1e-05): The clip value applied to the waveform before applying dynamic range compression during spectral normalization. normalize_min (`float`, *optional*, defaults to -11.512925148010254): The min value used for Tacotron 2-style linear normalization. The default is the original value from the Tacotron 2 implementation. normalize_max (`float`, *optional*, defaults to 2.3143386840820312): The max value used for Tacotron 2-style linear normalization. The default is the original value from the Tacotron 2 implementation. model_in_channels (`int`, *optional*, defaults to 64): The number of input channels to the [`UnivNetModel`] model. This should match `UnivNetModel.config.model_in_channels`. pad_end_length (`int`, *optional*, defaults to 10): If padding the end of each waveform, the number of spectrogram frames worth of samples to append. The number of appended samples will be `pad_end_length * hop_length`. return_attention_mask (`bool`, *optional*, defaults to `True`): Whether or not [`~UnivNetFeatureExtractor.__call__`] should return `attention_mask`. ''' def __init__(self, feature_size: int=1, sampling_rate: int=24000, padding_value: float=0.0, do_normalize: bool=False, num_mel_bins: int=100, hop_length: int=256, win_length: int=1024, win_function: str='hann_window', filter_length: Optional[int]=1024, max_length_s: int=10, fmin: float=0.0, fmax: Optional[float]=None, mel_floor: float=1e-09, center: bool=False, compression_factor: float=1.0, compression_clip_val: float=1e-05, normalize_min: float=-11.512925148010254, normalize_max: float=2.3143386840820312, model_in_channels: int=64, pad_end_length: int=10, return_attention_mask=True, **kwargs): pass def normalize(self, spectrogram): pass def denormalize(self, spectrogram): pass def mel_spectrogram(self, waveform: np.ndarray) -> np.ndarray: ''' Calculates log MEL spectrograms from a batch of waveforms. Note that the input waveform(s) will be padded by `int(self.n_fft - self.hop_length) / 2` on both sides using the `reflect` padding mode. Args: waveform (`np.ndarray` of shape `(length,)`): The input waveform. This must be a single real-valued, mono waveform. Returns: `numpy.ndarray`: Array containing a log-mel spectrogram of shape `(num_frames, num_mel_bins)`. ''' pass def generate_noise(self, noise_length: int, generator: Optional[np.random.Generator]=None) -> np.ndarray: ''' Generates a random noise sequence of standard Gaussian noise for use in the `noise_sequence` argument of [`UnivNetModel.forward`]. Args: spectrogram_length (`int`): The length (dim 0) of the generated noise. model_in_channels (`int`, *optional*, defaults to `None`): The number of features (dim 1) of the generated noise. This should correspond to the `model_in_channels` of the [`UnivNetGan`] model. If not set, this will default to `self.config.model_in_channels`. generator (`numpy.random.Generator`, *optional*, defaults to `None`) An optional `numpy.random.Generator` random number generator to control noise generation. If not set, a new generator with fresh entropy will be created. Returns: `numpy.ndarray`: Array containing random standard Gaussian noise of shape `(noise_length, model_in_channels)`. ''' pass def batch_decode(self, waveforms, waveform_lengths=None) -> list[np.ndarray]: ''' Removes padding from generated audio after running [`UnivNetModel.forward`]. This returns a ragged list of 1D audio waveform arrays and not a single tensor/array because in general the waveforms will have different lengths after removing padding. Args: waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): The batched output waveforms from the [`UnivNetModel`]. waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`, *optional*): The batched lengths of each waveform before padding. Returns: `list[np.ndarray]`: A ragged list of 1D waveform arrays with padding removed. ''' pass def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], sampling_rate: Optional[int]=None, padding: Union[bool, str, PaddingStrategy]=True, max_length: Optional[int]=None, truncation: bool=True, pad_to_multiple_of: Optional[int]=None, return_noise: bool=True, generator: Optional[np.random.Generator]=None, pad_end: bool=False, pad_length: Optional[int]=None, do_normalize: Optional[str]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None) -> BatchFeature: ''' Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the input `raw_speech` waveforms (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). If `pad_end = True`, that padding will occur before the `padding` strategy is applied. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`, *optional*, defaults to `True`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_noise (`bool`, *optional*, defaults to `True`): Whether to generate and return a noise waveform for use in [`UnivNetModel.forward`]. generator (`numpy.random.Generator`, *optional*, defaults to `None`): An optional `numpy.random.Generator` random number generator to use when generating noise. pad_end (`bool`, *optional*, defaults to `False`): Whether to pad the end of each waveform with silence. This can help reduce artifacts at the end of the generated audio sample; see https://github.com/seungwonpark/melgan/issues/8 for more details. This padding will be done before the padding strategy specified in `padding` is performed. pad_length (`int`, *optional*, defaults to `None`): If padding the end of each waveform, the length of the padding in spectrogram frames. If not set, this will default to `self.config.pad_end_length`. do_normalize (`bool`, *optional*): Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve the performance for some models. If not set, this will default to `self.config.do_normalize`. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.np.array` objects. - `'np'`: Return Numpy `np.ndarray` objects. ''' pass def to_dict(self) -> dict[str, Any]: pass
9
5
44
6
25
13
4
0.85
1
11
1
0
8
23
8
25
428
57
200
94
148
171
96
49
87
17
3
2
30
5,790
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/univnet/modeling_univnet.py
transformers.models.univnet.modeling_univnet.UnivNetKernelPredictor
from torch import nn import torch from .configuration_univnet import UnivNetConfig class UnivNetKernelPredictor(nn.Module): """ Implementation of the kernel predictor network which supplies the kernel and bias for the location variable convolutional layers (LVCs) in each UnivNet LVCBlock. Based on the KernelPredictor implementation in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L7). Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. conv_kernel_size (`int`, *optional*, defaults to 3): The kernel size for the location variable convolutional layer kernels (convolutional weight tensor). conv_layers (`int`, *optional*, defaults to 4): The number of location variable convolutional layers to output kernels and biases for. """ def __init__(self, config: UnivNetConfig, conv_kernel_size: int=3, conv_layers: int=4): super().__init__() self.conv_in_channels = config.model_hidden_channels self.conv_out_channels = 2 * config.model_hidden_channels self.conv_kernel_size = conv_kernel_size self.conv_layers = conv_layers self.kernel_channels = self.conv_in_channels * self.conv_out_channels * self.conv_kernel_size * self.conv_layers self.bias_channels = self.conv_out_channels * self.conv_layers self.resnet_in_channels = config.num_mel_bins self.resnet_hidden_channels = config.kernel_predictor_hidden_channels self.resnet_kernel_size = config.kernel_predictor_conv_size self.num_blocks = config.kernel_predictor_num_blocks self.leaky_relu_slope = config.leaky_relu_slope padding = (self.resnet_kernel_size - 1) // 2 self.input_conv = nn.Conv1d(self.resnet_in_channels, self.resnet_hidden_channels, 5, padding=2, bias=True) self.resblocks = nn.ModuleList([UnivNetKernelPredictorResidualBlock(config) for _ in range(self.num_blocks)]) self.kernel_conv = nn.Conv1d(self.resnet_hidden_channels, self.kernel_channels, self.resnet_kernel_size, padding=padding, bias=True) self.bias_conv = nn.Conv1d(self.resnet_hidden_channels, self.bias_channels, self.resnet_kernel_size, padding=padding, bias=True) def forward(self, spectrogram: torch.FloatTensor): """ Maps a conditioning log-mel spectrogram to a tensor of convolutional kernels and biases, for use in location variable convolutional layers. Note that the input spectrogram should have shape (batch_size, input_channels, seq_length). Args: spectrogram (`torch.FloatTensor` of shape `(batch_size, input_channels, seq_length)`): Tensor containing the log-mel spectrograms. Returns: tuple[`torch.FloatTensor, `torch.FloatTensor`]: tuple of tensors where the first element is the tensor of location variable convolution kernels of shape `(batch_size, self.conv_layers, self.conv_in_channels, self.conv_out_channels, self.conv_kernel_size, seq_length)` and the second element is the tensor of location variable convolution biases of shape `(batch_size, self.conv_layers. self.conv_out_channels, seq_length)`. """ batch_size, _, seq_length = spectrogram.shape hidden_states = self.input_conv(spectrogram) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) for resblock in self.resblocks: hidden_states = resblock(hidden_states) kernel_hidden_states = self.kernel_conv(hidden_states) bias_hidden_states = self.bias_conv(hidden_states) kernels = kernel_hidden_states.view(batch_size, self.conv_layers, self.conv_in_channels, self.conv_out_channels, self.conv_kernel_size, seq_length).contiguous() biases = bias_hidden_states.view(batch_size, self.conv_layers, self.conv_out_channels, seq_length).contiguous() return (kernels, biases) def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, 'weight_norm'): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.input_conv) for layer in self.resblocks: layer.apply_weight_norm() weight_norm(self.kernel_conv) weight_norm(self.bias_conv) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.input_conv) for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.kernel_conv) nn.utils.remove_weight_norm(self.bias_conv)
class UnivNetKernelPredictor(nn.Module): ''' Implementation of the kernel predictor network which supplies the kernel and bias for the location variable convolutional layers (LVCs) in each UnivNet LVCBlock. Based on the KernelPredictor implementation in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L7). Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. conv_kernel_size (`int`, *optional*, defaults to 3): The kernel size for the location variable convolutional layer kernels (convolutional weight tensor). conv_layers (`int`, *optional*, defaults to 4): The number of location variable convolutional layers to output kernels and biases for. ''' def __init__(self, config: UnivNetConfig, conv_kernel_size: int=3, conv_layers: int=4): pass def forward(self, spectrogram: torch.FloatTensor): ''' Maps a conditioning log-mel spectrogram to a tensor of convolutional kernels and biases, for use in location variable convolutional layers. Note that the input spectrogram should have shape (batch_size, input_channels, seq_length). Args: spectrogram (`torch.FloatTensor` of shape `(batch_size, input_channels, seq_length)`): Tensor containing the log-mel spectrograms. Returns: tuple[`torch.FloatTensor, `torch.FloatTensor`]: tuple of tensors where the first element is the tensor of location variable convolution kernels of shape `(batch_size, self.conv_layers, self.conv_in_channels, self.conv_out_channels, self.conv_kernel_size, seq_length)` and the second element is the tensor of location variable convolution biases of shape `(batch_size, self.conv_layers. self.conv_out_channels, seq_length)`. ''' pass def apply_weight_norm(self): pass def remove_weight_norm(self): pass
5
2
25
4
17
4
2
0.41
1
5
2
0
4
15
4
14
118
22
68
36
58
28
45
31
40
3
1
1
8
5,791
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/univnet/modeling_univnet.py
transformers.models.univnet.modeling_univnet.UnivNetKernelPredictorResidualBlock
from .configuration_univnet import UnivNetConfig from torch import nn import torch class UnivNetKernelPredictorResidualBlock(nn.Module): """ Implementation of the residual block for the kernel predictor network inside each location variable convolution block (LVCBlock). Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. """ def __init__(self, config: UnivNetConfig): super().__init__() self.channels = config.model_in_channels self.kernel_size = config.kernel_predictor_conv_size self.dropout_prob = config.kernel_predictor_dropout self.leaky_relu_slope = config.leaky_relu_slope padding = (self.kernel_size - 1) // 2 self.dropout = nn.Dropout(self.dropout_prob) self.conv1 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True) self.conv2 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True) def forward(self, hidden_states: torch.FloatTensor): residual = hidden_states hidden_states = self.dropout(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.conv2(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) return hidden_states + residual def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, 'weight_norm'): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv1) weight_norm(self.conv2) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv1) nn.utils.remove_weight_norm(self.conv2)
class UnivNetKernelPredictorResidualBlock(nn.Module): ''' Implementation of the residual block for the kernel predictor network inside each location variable convolution block (LVCBlock). Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. ''' def __init__(self, config: UnivNetConfig): pass def forward(self, hidden_states: torch.FloatTensor): pass def apply_weight_norm(self): pass def remove_weight_norm(self): pass
5
1
9
1
8
0
1
0.26
1
2
1
0
4
7
4
14
47
8
31
18
23
8
28
15
23
2
1
1
5
5,792
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/univnet/modeling_univnet.py
transformers.models.univnet.modeling_univnet.UnivNetLvcBlock
from .configuration_univnet import UnivNetConfig import torch from torch import nn class UnivNetLvcBlock(nn.Module): """ Implementation of the location variable convolution (LVC) residual block of the UnivNet residual block. Includes a `UnivNetKernelPredictor` inside to predict the kernels and biases of the LVC layers. Based on LVCBlock in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L98) Parameters: config (`UnivNetConfig`): Config for the `UnivNetModel` model. layer_id (`int`): An integer corresponding to the index of the current LVC resnet block layer. This should be between 0 and `len(config.resblock_stride_sizes) - 1)` inclusive. lvc_hop_size (`int`, *optional*, defaults to 256): The hop size for the location variable convolutional layers. """ def __init__(self, config: UnivNetConfig, layer_id: int, lvc_hop_size: int=256): super().__init__() self.hidden_channels = config.model_hidden_channels self.kernel_size = config.resblock_kernel_sizes[layer_id] self.stride = config.resblock_stride_sizes[layer_id] self.dilations = config.resblock_dilation_sizes[layer_id] self.cond_hop_length = lvc_hop_size self.leaky_relu_slope = config.leaky_relu_slope self.num_blocks = len(self.dilations) self.convt_pre = nn.ConvTranspose1d(self.hidden_channels, self.hidden_channels, 2 * self.stride, stride=self.stride, padding=self.stride // 2 + self.stride % 2, output_padding=self.stride % 2) self.kernel_predictor = UnivNetKernelPredictor(config, self.kernel_size, self.num_blocks) self.resblocks = nn.ModuleList([UnivNetLvcResidualBlock(config, self.kernel_size, self.dilations[i]) for i in range(self.num_blocks)]) def forward(self, hidden_states: torch.FloatTensor, spectrogram: torch.FloatTensor): hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.convt_pre(hidden_states) kernels, biases = self.kernel_predictor(spectrogram) for i, resblock in enumerate(self.resblocks): kernel = kernels[:, i, :, :, :, :] bias = biases[:, i, :, :] hidden_states = resblock(hidden_states, kernel, bias, hop_size=self.cond_hop_length) return hidden_states def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, 'weight_norm'): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.convt_pre) self.kernel_predictor.apply_weight_norm() for layer in self.resblocks: layer.apply_weight_norm() def remove_weight_norm(self): nn.utils.remove_weight_norm(self.convt_pre) self.kernel_predictor.remove_weight_norm() for layer in self.resblocks: layer.remove_weight_norm()
class UnivNetLvcBlock(nn.Module): ''' Implementation of the location variable convolution (LVC) residual block of the UnivNet residual block. Includes a `UnivNetKernelPredictor` inside to predict the kernels and biases of the LVC layers. Based on LVCBlock in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L98) Parameters: config (`UnivNetConfig`): Config for the `UnivNetModel` model. layer_id (`int`): An integer corresponding to the index of the current LVC resnet block layer. This should be between 0 and `len(config.resblock_stride_sizes) - 1)` inclusive. lvc_hop_size (`int`, *optional*, defaults to 256): The hop size for the location variable convolutional layers. ''' def __init__(self, config: UnivNetConfig, layer_id: int, lvc_hop_size: int=256): pass def forward(self, hidden_states: torch.FloatTensor, spectrogram: torch.FloatTensor): pass def apply_weight_norm(self): pass def remove_weight_norm(self): pass
5
1
14
2
12
1
2
0.33
1
7
3
0
4
10
4
14
78
13
49
27
39
16
35
22
30
3
1
1
8
5,793
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/univnet/modeling_univnet.py
transformers.models.univnet.modeling_univnet.UnivNetLvcResidualBlock
from torch import nn from .configuration_univnet import UnivNetConfig import torch class UnivNetLvcResidualBlock(nn.Module): """ Implementation of the location variable convolution (LVC) residual block for the UnivNet residual network. Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. kernel_size (`int`): The kernel size for the dilated 1D convolutional layer. dilation (`int`): The dilation for the dilated 1D convolutional layer. """ def __init__(self, config: UnivNetConfig, kernel_size: int, dilation: int): super().__init__() self.hidden_channels = config.model_hidden_channels self.kernel_size = kernel_size self.dilation = dilation self.leaky_relu_slope = config.leaky_relu_slope padding = self.dilation * (self.kernel_size - 1) // 2 self.conv = nn.Conv1d(self.hidden_channels, self.hidden_channels, self.kernel_size, padding=padding, dilation=self.dilation) def forward(self, hidden_states, kernel, bias, hop_size=256): residual = hidden_states hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.conv(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.location_variable_convolution(hidden_states, kernel, bias, hop_size=hop_size) hidden_states = torch.sigmoid(hidden_states[:, :self.hidden_channels, :]) * torch.tanh(hidden_states[:, self.hidden_channels:, :]) hidden_states = residual + hidden_states return hidden_states def location_variable_convolution(self, hidden_states: torch.FloatTensor, kernel: torch.FloatTensor, bias: torch.FloatTensor, dilation: int=1, hop_size: int=256): """ Performs location-variable convolution operation on the input sequence (hidden_states) using the local convolution kernel. This was introduced in [LVCNet: Efficient Condition-Dependent Modeling Network for Waveform Generation](https://huggingface.co/papers/2102.10815) by Zhen Zheng, Jianzong Wang, Ning Cheng, and Jing Xiao. Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100. Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, in_channels, in_length)`): The input sequence of shape (batch, in_channels, in_length). kernel (`torch.FloatTensor` of shape `(batch_size, in_channels, out_channels, kernel_size, kernel_length)`): The local convolution kernel of shape (batch, in_channels, out_channels, kernel_size, kernel_length). bias (`torch.FloatTensor` of shape `(batch_size, out_channels, kernel_length)`): The bias for the local convolution of shape (batch, out_channels, kernel_length). dilation (`int`, *optional*, defaults to 1): The dilation of convolution. hop_size (`int`, *optional*, defaults to 256): The hop_size of the conditioning sequence. Returns: `torch.FloatTensor`: the output sequence after performing local convolution with shape (batch_size, out_channels, in_length). """ batch, _, in_length = hidden_states.shape batch, _, out_channels, kernel_size, kernel_length = kernel.shape if in_length != kernel_length * hop_size: raise ValueError(f'Dim 2 of `hidden_states` should be {kernel_length * hop_size}) but got {in_length}. Please check `hidden_states` or `kernel` and `hop_size` to make sure they are correct.') padding = dilation * int((kernel_size - 1) / 2) hidden_states = nn.functional.pad(hidden_states, (padding, padding), 'constant', 0) hidden_states = hidden_states.unfold(2, hop_size + 2 * padding, hop_size) if hop_size < dilation: hidden_states = nn.functional.pad(hidden_states, (0, dilation), 'constant', 0) hidden_states = hidden_states.unfold(3, dilation, dilation) hidden_states = hidden_states[:, :, :, :, :hop_size] hidden_states = hidden_states.transpose(3, 4) hidden_states = hidden_states.unfold(4, kernel_size, 1) output_hidden_states = torch.einsum('bildsk,biokl->bolsd', hidden_states, kernel) output_hidden_states = output_hidden_states.to(memory_format=torch.channels_last_3d) bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d) output_hidden_states = output_hidden_states + bias output_hidden_states = output_hidden_states.contiguous().view(batch, out_channels, -1) return output_hidden_states def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, 'weight_norm'): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv)
class UnivNetLvcResidualBlock(nn.Module): ''' Implementation of the location variable convolution (LVC) residual block for the UnivNet residual network. Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. kernel_size (`int`): The kernel size for the dilated 1D convolutional layer. dilation (`int`): The dilation for the dilated 1D convolutional layer. ''' def __init__(self, config: UnivNetConfig, kernel_size: int, dilation: int): pass def forward(self, hidden_states, kernel, bias, hop_size=256): pass def location_variable_convolution(self, hidden_states: torch.FloatTensor, kernel: torch.FloatTensor, bias: torch.FloatTensor, dilation: int=1, hop_size: int=256): ''' Performs location-variable convolution operation on the input sequence (hidden_states) using the local convolution kernel. This was introduced in [LVCNet: Efficient Condition-Dependent Modeling Network for Waveform Generation](https://huggingface.co/papers/2102.10815) by Zhen Zheng, Jianzong Wang, Ning Cheng, and Jing Xiao. Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100. Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, in_channels, in_length)`): The input sequence of shape (batch, in_channels, in_length). kernel (`torch.FloatTensor` of shape `(batch_size, in_channels, out_channels, kernel_size, kernel_length)`): The local convolution kernel of shape (batch, in_channels, out_channels, kernel_size, kernel_length). bias (`torch.FloatTensor` of shape `(batch_size, out_channels, kernel_length)`): The bias for the local convolution of shape (batch, out_channels, kernel_length). dilation (`int`, *optional*, defaults to 1): The dilation of convolution. hop_size (`int`, *optional*, defaults to 256): The hop_size of the conditioning sequence. Returns: `torch.FloatTensor`: the output sequence after performing local convolution with shape (batch_size, out_channels, in_length). ''' pass def apply_weight_norm(self): pass def remove_weight_norm(self): pass
6
2
21
2
13
6
2
0.57
1
4
1
0
5
5
5
15
125
18
68
30
50
39
45
18
39
3
1
1
8
5,794
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/univnet/modeling_univnet.py
transformers.models.univnet.modeling_univnet.UnivNetModel
from typing import Optional, Union import torch from .configuration_univnet import UnivNetConfig from torch import nn from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging @auto_docstring class UnivNetModel(PreTrainedModel): config: UnivNetConfig main_input_name = 'input_features' def __init__(self, config: UnivNetConfig): super().__init__(config) self.num_kernels = len(config.resblock_kernel_sizes) self.leaky_relu_slope = config.leaky_relu_slope self.conv_pre = nn.Conv1d(config.model_in_channels, config.model_hidden_channels, kernel_size=7, stride=1, padding=3, padding_mode='reflect') num_layers = len(config.resblock_stride_sizes) hop_length = 1 hop_lengths = [] for stride in config.resblock_stride_sizes: hop_length = hop_length * stride hop_lengths.append(hop_length) self.resblocks = nn.ModuleList([UnivNetLvcBlock(config, layer_id=i, lvc_hop_size=hop_lengths[i]) for i in range(num_layers)]) self.conv_post = nn.Conv1d(config.model_hidden_channels, 1, 7, padding=3, padding_mode='reflect') self.post_init() @auto_docstring def forward(self, input_features: torch.FloatTensor, noise_sequence: Optional[torch.FloatTensor]=None, padding_mask: Optional[torch.FloatTensor]=None, generator: Optional[torch.Generator]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], UnivNetModelOutput]: """ noise_sequence (`torch.FloatTensor`, *optional*): Tensor containing a noise sequence of standard Gaussian noise. Can be batched and of shape `(batch_size, sequence_length, config.model_in_channels)`, or un-batched and of shape (sequence_length, config.model_in_channels)`. If not supplied, will be randomly generated. padding_mask (`torch.BoolTensor`, *optional*): Mask indicating which parts of each sequence are padded. Mask values are selected in `[0, 1]`: - 1 for tokens that are **not masked** - 0 for tokens that are **masked** The mask can be batched and of shape `(batch_size, sequence_length)` or un-batched and of shape `(sequence_length,)`. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. return_dict: Whether to return a [`~utils.ModelOutput`] subclass instead of a plain tuple. Example: ```python >>> from transformers import UnivNetFeatureExtractor, UnivNetModel >>> from datasets import load_dataset, Audio >>> model = UnivNetModel.from_pretrained("dg845/univnet-dev") >>> feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> # Resample the audio to the feature extractor's sampling rate. >>> ds = ds.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate)) >>> inputs = feature_extractor( ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> audio = model(**inputs).waveforms >>> list(audio.shape) [1, 140288] ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict spectrogram_batched = input_features.dim() == 3 if not spectrogram_batched: input_features = input_features.unsqueeze(0) spectrogram_batch_size, spectrogram_length, _ = input_features.shape if noise_sequence is not None: noise_sequence_batched = noise_sequence.dim() == 3 if not noise_sequence_batched: noise_sequence = noise_sequence.unsqueeze(0) else: noise_sequence_shape = (spectrogram_batch_size, spectrogram_length, self.config.model_in_channels) noise_sequence = torch.randn(noise_sequence_shape, generator=generator, dtype=input_features.dtype, device=input_features.device) noise_sequence_batch_size = noise_sequence.shape[0] if spectrogram_batch_size > 1 and noise_sequence_batch_size == 1: noise_sequence = noise_sequence.repeat(spectrogram_batch_size, 1, 1) elif noise_sequence_batch_size > 1 and spectrogram_batch_size == 1: input_features = input_features.repeat(noise_sequence_batch_size, 1, 1) if noise_sequence_batch_size != spectrogram_batch_size: raise ValueError(f'The batch size of `noise_sequence` is {noise_sequence_batch_size} and the batch size of `input_features` is {spectrogram_batch_size}, but the two are expected to be equal.') if padding_mask is not None: if padding_mask.dim() == 1: padding_mask = padding_mask.unsqueeze(0) padding_mask_batch_size = padding_mask.shape[0] if padding_mask_batch_size != spectrogram_batch_size: raise ValueError(f'The batch size of `padding_mask` is {padding_mask_batch_size} and the batch size of `input_features` is {spectrogram_batch_size}, but the two are expected to be equal.') hidden_states = noise_sequence.transpose(2, 1) input_features = input_features.transpose(2, 1) hidden_states = self.conv_pre(hidden_states) for resblock in self.resblocks: hidden_states = resblock(hidden_states, input_features) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.conv_post(hidden_states) hidden_states = torch.tanh(hidden_states) waveform = hidden_states.squeeze(1) waveform_lengths = None if padding_mask is not None: waveform_lengths = torch.sum(padding_mask, dim=1) if not return_dict: outputs = (waveform, waveform_lengths) return outputs return UnivNetModelOutput(waveforms=waveform, waveform_lengths=waveform_lengths) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, 'weight_norm'): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv_pre) for layer in self.resblocks: layer.apply_weight_norm() weight_norm(self.conv_post) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv_pre) for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.conv_post)
@auto_docstring class UnivNetModel(PreTrainedModel): def __init__(self, config: UnivNetConfig): pass @auto_docstring def forward(self, input_features: torch.FloatTensor, noise_sequence: Optional[torch.FloatTensor]=None, padding_mask: Optional[torch.FloatTensor]=None, generator: Optional[torch.Generator]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], UnivNetModelOutput]: ''' noise_sequence (`torch.FloatTensor`, *optional*): Tensor containing a noise sequence of standard Gaussian noise. Can be batched and of shape `(batch_size, sequence_length, config.model_in_channels)`, or un-batched and of shape (sequence_length, config.model_in_channels)`. If not supplied, will be randomly generated. padding_mask (`torch.BoolTensor`, *optional*): Mask indicating which parts of each sequence are padded. Mask values are selected in `[0, 1]`: - 1 for tokens that are **not masked** - 0 for tokens that are **masked** The mask can be batched and of shape `(batch_size, sequence_length)` or un-batched and of shape `(sequence_length,)`. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. return_dict: Whether to return a [`~utils.ModelOutput`] subclass instead of a plain tuple. Example: ```python >>> from transformers import UnivNetFeatureExtractor, UnivNetModel >>> from datasets import load_dataset, Audio >>> model = UnivNetModel.from_pretrained("dg845/univnet-dev") >>> feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> # Resample the audio to the feature extractor's sampling rate. >>> ds = ds.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate)) >>> inputs = feature_extractor( ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> audio = model(**inputs).waveforms >>> list(audio.shape) [1, 140288] ``` ''' pass def _init_weights(self, module): '''Initialize the weights.''' pass def apply_weight_norm(self): pass def remove_weight_norm(self): pass
8
2
33
5
22
6
5
0.27
1
7
3
0
5
5
5
5
173
29
113
39
98
31
75
31
69
14
1
2
24
5,795
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/upernet/configuration_upernet.py
transformers.models.upernet.configuration_upernet.UperNetConfig
from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto.configuration_auto import CONFIG_MAPPING from ...configuration_utils import PretrainedConfig class UperNetConfig(PretrainedConfig): """ This is the configuration class to store the configuration of an [`UperNetForSemanticSegmentation`]. It is used to instantiate an UperNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UperNet [openmmlab/upernet-convnext-tiny](https://huggingface.co/openmmlab/upernet-convnext-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. hidden_size (`int`, *optional*, defaults to 512): The number of hidden units in the convolutional layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. pool_scales (`tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): Pooling scales used in Pooling Pyramid Module applied on the last feature map. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. auxiliary_channels (`int`, *optional*, defaults to 256): Number of channels to use in the auxiliary head. auxiliary_num_convs (`int`, *optional*, defaults to 1): Number of convolutional layers to use in the auxiliary head. auxiliary_concat_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the output of the auxiliary head with the input before the classification layer. loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function. Examples: ```python >>> from transformers import UperNetConfig, UperNetForSemanticSegmentation >>> # Initializing a configuration >>> configuration = UperNetConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = UperNetForSemanticSegmentation(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'upernet' def __init__(self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, hidden_size=512, initializer_range=0.02, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_in_channels=None, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, loss_ignore_index=255, **kwargs): super().__init__(**kwargs) if backbone_config is None and backbone is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') backbone_config = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4']) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get('model_type') config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs) self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.hidden_size = hidden_size self.initializer_range = initializer_range self.pool_scales = pool_scales self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.auxiliary_in_channels = auxiliary_in_channels self.auxiliary_channels = auxiliary_channels self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.loss_ignore_index = loss_ignore_index @property def sub_configs(self): return {'backbone_config': type(self.backbone_config)} if getattr(self, 'backbone_config', None) is not None else {}
class UperNetConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of an [`UperNetForSemanticSegmentation`]. It is used to instantiate an UperNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UperNet [openmmlab/upernet-convnext-tiny](https://huggingface.co/openmmlab/upernet-convnext-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. hidden_size (`int`, *optional*, defaults to 512): The number of hidden units in the convolutional layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. pool_scales (`tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): Pooling scales used in Pooling Pyramid Module applied on the last feature map. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. auxiliary_channels (`int`, *optional*, defaults to 256): Number of channels to use in the auxiliary head. auxiliary_num_convs (`int`, *optional*, defaults to 1): Number of convolutional layers to use in the auxiliary head. auxiliary_concat_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the output of the auxiliary head with the input before the classification layer. loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function. Examples: ```python >>> from transformers import UperNetConfig, UperNetForSemanticSegmentation >>> # Initializing a configuration >>> configuration = UperNetConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = UperNetForSemanticSegmentation(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, hidden_size=512, initializer_range=0.02, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_in_channels=None, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, loss_ignore_index=255, **kwargs): pass @property def sub_configs(self): pass
4
1
51
2
49
0
3
0.98
1
2
0
0
1
15
1
1
112
11
51
38
31
50
26
20
24
3
1
1
3
5,796
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/upernet/modeling_upernet.py
transformers.models.upernet.modeling_upernet.UperNetConvModule
import torch from torch import nn from typing import Optional, Union class UperNetConvModule(nn.Module): """ A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). """ def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, tuple[int, int]], padding: Union[int, tuple[int, int], str]=0, bias: bool=False, dilation: Union[int, tuple[int, int]]=1) -> None: super().__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding, bias=bias, dilation=dilation) self.batch_norm = nn.BatchNorm2d(out_channels) self.activation = nn.ReLU() def forward(self, input: torch.Tensor) -> torch.Tensor: output = self.conv(input) output = self.batch_norm(output) output = self.activation(output) return output
class UperNetConvModule(nn.Module): ''' A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). ''' def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, tuple[int, int]], padding: Union[int, tuple[int, int], str]=0, bias: bool=False, dilation: Union[int, tuple[int, int]]=1) -> None: pass def forward(self, input: torch.Tensor) -> torch.Tensor: pass
3
1
13
1
13
0
1
0.15
1
5
0
0
2
3
2
12
33
3
26
15
15
4
11
7
8
1
1
0
2
5,797
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/upernet/modeling_upernet.py
transformers.models.upernet.modeling_upernet.UperNetFCNHead
from torch import nn from typing import Optional, Union import torch class UperNetFCNHead(nn.Module): """ Fully Convolution Networks for Semantic Segmentation. This head is the implementation of [FCNNet](https://huggingface.co/papers/1411.4038>). Args: config: Configuration. in_channels (int): Number of input channels. kernel_size (int): The kernel size for convs in the head. Default: 3. dilation (int): The dilation rate for convs in the head. Default: 1. """ def __init__(self, config, in_channels, in_index: int=2, kernel_size: int=3, dilation: Union[int, tuple[int, int]]=1) -> None: super().__init__() self.config = config self.in_channels = in_channels[in_index] if config.auxiliary_in_channels is None else config.auxiliary_in_channels self.channels = config.auxiliary_channels self.num_convs = config.auxiliary_num_convs self.concat_input = config.auxiliary_concat_input self.in_index = in_index conv_padding = kernel_size // 2 * dilation convs = [] convs.append(UperNetConvModule(self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation)) for i in range(self.num_convs - 1): convs.append(UperNetConvModule(self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation)) if self.num_convs == 0: self.convs = nn.Identity() else: self.convs = nn.Sequential(*convs) if self.concat_input: self.conv_cat = UperNetConvModule(self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2) self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1) def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = encoder_hidden_states[self.in_index] output = self.convs(hidden_states) if self.concat_input: output = self.conv_cat(torch.cat([hidden_states, output], dim=1)) output = self.classifier(output) return output
class UperNetFCNHead(nn.Module): ''' Fully Convolution Networks for Semantic Segmentation. This head is the implementation of [FCNNet](https://huggingface.co/papers/1411.4038>). Args: config: Configuration. in_channels (int): Number of input channels. kernel_size (int): The kernel size for convs in the head. Default: 3. dilation (int): The dilation rate for convs in the head. Default: 1. ''' def __init__(self, config, in_channels, in_index: int=2, kernel_size: int=3, dilation: Union[int, tuple[int, int]]=1) -> None: pass def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: pass
3
1
13
1
12
0
3
0.3
1
5
1
0
4
9
4
14
69
8
47
21
40
14
34
19
29
4
1
2
10
5,798
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/upernet/modeling_upernet.py
transformers.models.upernet.modeling_upernet.UperNetForSemanticSegmentation
from ...modeling_outputs import SemanticSegmenterOutput import torch from ...utils.backbone_utils import load_backbone from typing import Optional, Union from torch.nn import CrossEntropyLoss from torch import nn from ...utils import auto_docstring @auto_docstring(custom_intro='\n UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.\n ') class UperNetForSemanticSegmentation(UperNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.backbone = load_backbone(config) self.decode_head = UperNetHead(config, in_channels=self.backbone.channels) self.auxiliary_head = UperNetFCNHead(config, in_channels=self.backbone.channels) if config.use_auxiliary_head else None self.post_init() @auto_docstring def forward(self, pixel_values: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Examples: ```python >>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation >>> from PIL import Image >>> from huggingface_hub import hf_hub_download >>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny") >>> model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny") >>> filepath = hf_hub_download( ... repo_id="hf-internal-testing/fixtures_ade20k", filename="ADE_val_00000001.jpg", repo_type="dataset" ... ) >>> image = Image.open(filepath).convert("RGB") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits # shape (batch_size, num_labels, height, width) >>> list(logits.shape) [1, 150, 512, 512] ```""" if labels is not None and self.config.num_labels == 1: raise ValueError('The number of labels should be greater than one') return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions outputs = self.backbone.forward_with_filtered_kwargs(pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions) features = outputs.feature_maps logits = self.decode_head(features) logits = nn.functional.interpolate(logits, size=pixel_values.shape[2:], mode='bilinear', align_corners=False) auxiliary_logits = None if self.auxiliary_head is not None: auxiliary_logits = self.auxiliary_head(features) auxiliary_logits = nn.functional.interpolate(auxiliary_logits, size=pixel_values.shape[2:], mode='bilinear', align_corners=False) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index) loss = loss_fct(logits, labels) if auxiliary_logits is not None: auxiliary_loss = loss_fct(auxiliary_logits, labels) loss += self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.\n ') class UperNetForSemanticSegmentation(UperNetPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, pixel_values: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Examples: ```python >>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation >>> from PIL import Image >>> from huggingface_hub import hf_hub_download >>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny") >>> model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny") >>> filepath = hf_hub_download( ... repo_id="hf-internal-testing/fixtures_ade20k", filename="ADE_val_00000001.jpg", repo_type="dataset" ... ) >>> image = Image.open(filepath).convert("RGB") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits # shape (batch_size, num_labels, height, width) >>> list(logits.shape) [1, 150, 512, 512] ```''' pass
5
1
47
9
26
13
7
0.45
1
8
3
0
2
3
2
4
98
18
55
22
43
25
34
14
31
11
2
2
13
5,799
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/upernet/modeling_upernet.py
transformers.models.upernet.modeling_upernet.UperNetHead
import torch from torch import nn class UperNetHead(nn.Module): """ Unified Perceptual Parsing for Scene Understanding. This head is the implementation of [UPerNet](https://huggingface.co/papers/1807.10221). """ def __init__(self, config, in_channels): super().__init__() self.config = config self.pool_scales = config.pool_scales self.in_channels = in_channels self.channels = config.hidden_size self.align_corners = False self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1) self.psp_modules = UperNetPyramidPoolingModule(self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners) self.bottleneck = UperNetConvModule(self.in_channels[-1] + len(self.pool_scales) * self.channels, self.channels, kernel_size=3, padding=1) self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for in_channels in self.in_channels[:-1]: l_conv = UperNetConvModule(in_channels, self.channels, kernel_size=1) fpn_conv = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) self.fpn_bottleneck = UperNetConvModule(len(self.in_channels) * self.channels, self.channels, kernel_size=3, padding=1) def psp_forward(self, inputs): x = inputs[-1] psp_outs = [x] psp_outs.extend(self.psp_modules(x)) psp_outs = torch.cat(psp_outs, dim=1) output = self.bottleneck(psp_outs) return output def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)] laterals.append(self.psp_forward(encoder_hidden_states)) used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): prev_shape = laterals[i - 1].shape[2:] laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(laterals[i], size=prev_shape, mode='bilinear', align_corners=self.align_corners) fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)] fpn_outs.append(laterals[-1]) for i in range(used_backbone_levels - 1, 0, -1): fpn_outs[i] = nn.functional.interpolate(fpn_outs[i], size=fpn_outs[0].shape[2:], mode='bilinear', align_corners=self.align_corners) fpn_outs = torch.cat(fpn_outs, dim=1) output = self.fpn_bottleneck(fpn_outs) output = self.classifier(output) return output
class UperNetHead(nn.Module): ''' Unified Perceptual Parsing for Scene Understanding. This head is the implementation of [UPerNet](https://huggingface.co/papers/1807.10221). ''' def __init__(self, config, in_channels): pass def psp_forward(self, inputs): pass def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: pass
4
1
16
2
13
2
2
0.18
1
6
2
0
5
11
5
15
91
14
67
27
61
12
48
27
42
3
1
2
10