id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
1,400
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2DropPath
from torch import nn from typing import Optional import torch class ConvNextV2DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f'p={self.drop_prob}'
class ConvNextV2DropPath(nn.Module): '''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).''' def __init__(self, drop_prob: Optional[float]=None) -> None: pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass def extra_repr(self) -> str: pass
4
1
2
0
2
0
1
0.13
1
4
0
0
3
1
3
13
12
3
8
5
4
1
8
5
4
1
1
0
3
1,401
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2Embeddings
import torch from torch import nn class ConvNextV2Embeddings(nn.Module): """This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. """ def __init__(self, config): super().__init__() self.patch_embeddings = nn.Conv2d(config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size) self.layernorm = ConvNextV2LayerNorm(config.hidden_sizes[0], eps=1e-06, data_format='channels_first') self.num_channels = config.num_channels def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embeddings = self.patch_embeddings(pixel_values) embeddings = self.layernorm(embeddings) return embeddings
class ConvNextV2Embeddings(nn.Module): '''This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. ''' def __init__(self, config): pass def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: pass
3
1
8
0
8
0
2
0.18
1
4
1
0
2
3
2
12
22
2
17
8
14
3
13
8
10
2
1
1
3
1,402
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2Encoder
from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from typing import Optional from torch import nn import torch class ConvNextV2Encoder(nn.Module): def __init__(self, config): super().__init__() self.stages = nn.ModuleList() drop_path_rates = [x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu').split(config.depths)] prev_chs = config.hidden_sizes[0] for i in range(config.num_stages): out_chs = config.hidden_sizes[i] stage = ConvNextV2Stage(config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i]) self.stages.append(stage) prev_chs = out_chs def forward(self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool]=False) -> BaseModelOutputWithNoAttention: all_hidden_states = [hidden_states] if output_hidden_states else None for layer_module in self.stages: hidden_states = layer_module(hidden_states) if all_hidden_states is not None: all_hidden_states.append(hidden_states) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
class ConvNextV2Encoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool]=False) -> BaseModelOutputWithNoAttention: pass
3
0
22
3
19
0
5
0
1
7
2
0
2
1
2
12
45
6
39
16
31
0
22
11
19
6
1
2
9
1,403
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2ForImageClassification
from torch import nn from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from typing import Optional from ...utils.generic import can_return_tuple from ...utils import auto_docstring, logging import torch @auto_docstring(custom_intro='\n ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ') class ConvNextV2ForImageClassification(ConvNextV2PreTrainedModel): accepts_loss_kwargs = False def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.convnextv2 = ConvNextV2Model(config) if config.num_labels > 0: self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) else: self.classifier = nn.Identity() self.post_init() @can_return_tuple @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs) -> ImageClassifierOutputWithNoAttention: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs: BaseModelOutputWithPoolingAndNoAttention = self.convnextv2(pixel_values, **kwargs) pooled_output = outputs.pooler_output logits = self.classifier(pooled_output) loss = None if labels is not None: loss = self.loss_function(labels=labels, pooled_logits=logits, config=self.config) return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
@auto_docstring(custom_intro='\n ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ') class ConvNextV2ForImageClassification(ConvNextV2PreTrainedModel): def __init__(self, config): pass @can_return_tuple @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs) -> ImageClassifierOutputWithNoAttention: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
6
1
33
5
24
4
8
0.14
1
5
2
0
2
3
2
3
74
10
56
19
40
8
32
12
29
13
2
3
15
1,404
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2GRN
from torch import nn import torch class ConvNextV2GRN(nn.Module): """GRN (Global Response Normalization) layer""" def __init__(self, dim: int): super().__init__() self.weight = nn.Parameter(torch.zeros(1, 1, 1, dim)) self.bias = nn.Parameter(torch.zeros(1, 1, 1, dim)) def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: global_features = torch.linalg.vector_norm(hidden_states, ord=2, dim=(1, 2), keepdim=True) norm_features = global_features / (global_features.mean(dim=-1, keepdim=True) + 1e-06) hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states return hidden_states
class ConvNextV2GRN(nn.Module): '''GRN (Global Response Normalization) layer''' def __init__(self, dim: int): pass def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: pass
3
1
6
1
5
1
1
0.2
1
2
0
0
2
2
2
12
15
3
10
7
7
2
10
7
7
1
1
0
2
1,405
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2Layer
import torch from torch import nn from ...activations import ACT2FN class ConvNextV2Layer(nn.Module): """This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Args: config ([`ConvNextV2Config`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0. """ def __init__(self, config, dim, drop_path=0): super().__init__() self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) self.layernorm = ConvNextV2LayerNorm(dim, eps=1e-06) self.pwconv1 = nn.Linear(dim, 4 * dim) self.act = ACT2FN[config.hidden_act] self.grn = ConvNextV2GRN(4 * dim) self.pwconv2 = nn.Linear(4 * dim, dim) self.drop_path = ConvNextV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, features: torch.Tensor) -> torch.Tensor: residual = features features = self.dwconv(features) features = features.permute(0, 2, 3, 1) features = self.layernorm(features) features = self.pwconv1(features) features = self.act(features) features = self.grn(features) features = self.pwconv2(features) features = features.permute(0, 3, 1, 2) features = residual + self.drop_path(features) return features
class ConvNextV2Layer(nn.Module): '''This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Args: config ([`ConvNextV2Config`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0. ''' def __init__(self, config, dim, drop_path=0): pass def forward(self, features: torch.Tensor) -> torch.Tensor: pass
3
1
13
1
11
2
2
0.59
1
5
3
0
2
7
2
12
41
6
22
11
19
13
22
11
19
2
1
0
3
1,406
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2LayerNorm
import torch from torch import nn class ConvNextV2LayerNorm(nn.LayerNorm): """LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, *, eps=1e-06, data_format='channels_last', **kwargs): super().__init__(normalized_shape, eps=eps, **kwargs) if data_format not in ['channels_last', 'channels_first']: raise NotImplementedError(f'Unsupported data format: {data_format}') self.data_format = data_format def forward(self, features: torch.Tensor) -> torch.Tensor: """ Args: features: Tensor of shape (batch_size, channels, height, width) OR (batch_size, height, width, channels) """ if self.data_format == 'channels_first': features = features.permute(0, 2, 3, 1) features = super().forward(features) features = features.permute(0, 3, 1, 2) else: features = super().forward(features) return features
class ConvNextV2LayerNorm(nn.LayerNorm): '''LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). ''' def __init__(self, normalized_shape, *, eps=1e-06, data_format='channels_last', **kwargs): pass def forward(self, features: torch.Tensor) -> torch.Tensor: ''' Args: features: Tensor of shape (batch_size, channels, height, width) OR (batch_size, height, width, channels) ''' pass
3
2
11
0
11
0
3
0.18
1
3
0
0
2
5
2
12
28
2
22
11
19
4
21
11
18
3
1
1
5
1,407
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2Model
from ...utils.generic import can_return_tuple from ...utils import auto_docstring, logging import torch from torch import nn from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from typing import Optional @auto_docstring class ConvNextV2Model(ConvNextV2PreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = ConvNextV2Embeddings(config) self.encoder = ConvNextV2Encoder(config) self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) self.post_init() @can_return_tuple @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutputWithPoolingAndNoAttention: if output_hidden_states is None: output_hidden_states = self.config.output_hidden_states if pixel_values is None: raise ValueError('You have to specify pixel_values') embedding_output = self.embeddings(pixel_values) encoder_outputs: BaseModelOutputWithNoAttention = self.encoder(embedding_output, output_hidden_states=output_hidden_states) last_hidden_state = encoder_outputs.last_hidden_state pooled_output = self.layernorm(last_hidden_state.mean([-2, -1])) return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
@auto_docstring class ConvNextV2Model(ConvNextV2PreTrainedModel): def __init__(self, config): pass @can_return_tuple @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutputWithPoolingAndNoAttention: pass
6
0
24
5
17
2
3
0.07
1
6
3
0
2
4
2
3
57
11
43
17
27
3
20
11
17
5
2
1
6
1,408
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2PreTrainedModel
from ...modeling_utils import PreTrainedModel from .configuration_convnextv2 import ConvNextV2Config from ...utils import auto_docstring, logging from torch import nn @auto_docstring class ConvNextV2PreTrainedModel(PreTrainedModel): config: ConvNextV2Config base_model_prefix = 'convnextv2' main_input_name = 'pixel_values' _no_split_modules = ['ConvNextV2Layer'] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, ConvNextV2LayerNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, ConvNextV2GRN): module.weight.data.zero_() module.bias.data.zero_()
@auto_docstring class ConvNextV2PreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
11
0
8
3
4
0.54
1
0
0
3
1
0
1
1
22
2
13
6
11
7
12
6
10
4
1
2
4
1,409
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2Stage
import torch from torch import nn class ConvNextV2Stage(nn.Module): """ConvNeXTV2 stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config ([`ConvNextV2Config`]): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`list[float]`): Stochastic depth rates for each layer. """ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None): super().__init__() if in_channels != out_channels or stride > 1: self.downsampling_layer = nn.ModuleList([ConvNextV2LayerNorm(in_channels, eps=1e-06, data_format='channels_first'), nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride)]) else: self.downsampling_layer = nn.ModuleList() drop_path_rates = drop_path_rates or [0.0] * depth self.layers = nn.ModuleList([ConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]) def forward(self, features: torch.Tensor) -> torch.Tensor: for layer in self.downsampling_layer: features = layer(features) for layer in self.layers: features = layer(features) return features
class ConvNextV2Stage(nn.Module): '''ConvNeXTV2 stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config ([`ConvNextV2Config`]): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`list[float]`): Stochastic depth rates for each layer. ''' def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None): pass def forward(self, features: torch.Tensor) -> torch.Tensor: pass
3
1
9
1
9
0
2
0.44
1
5
2
0
2
2
2
12
30
4
18
5
15
8
12
5
9
2
1
1
3
1,410
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpm/tokenization_cpm.py
transformers.models.cpm.tokenization_cpm.CpmTokenizer
from ...tokenization_utils import AddedToken, PreTrainedTokenizer import unicodedata import os from ...utils import SPIECE_UNDERLINE, logging import sentencepiece as spm from typing import Any, Optional from shutil import copyfile from ...utils.import_utils import requires @requires(backends=('sentencepiece',)) class CpmTokenizer(PreTrainedTokenizer): """Runs pre-tokenization with Jieba-RS segmentation tool. It is used in CPM models.""" vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', additional_special_tokens=['<eop>', '<eod>'], sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None: """ Construct a CPM tokenizer. Based on [Jieba-RS](https://pypi.org/project/rjieba/) and [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `True`): Whether to lowercase the input when tokenizing. remove_space (`bool`, *optional*, defaults to `True`): Whether to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (`bool`, *optional*, defaults to `False`): Whether to keep accents when tokenizing. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"<sep>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"<cls>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`list[str]`, *optional*, defaults to `["<eop>", "<eod>"]`): Additional special tokens used by the tokenizer. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) try: import rjieba except ModuleNotFoundError as error: raise error.__class__('You need to install rjieba to use CpmTokenizer or CpmTokenizerFast. See https://pypi.org/project/rjieba/ for installation.') self.jieba = rjieba self.translator = str.maketrans(' \n', '▂▃') super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs) self._pad_token_type_id = 3 @property def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def preprocess_text(self, inputs): if self.remove_space: outputs = ' '.join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace('``', '"').replace("''", '"') if not self.keep_accents: outputs = unicodedata.normalize('NFKD', outputs) outputs = ''.join([c for c in outputs if not unicodedata.combining(c)]) if self.do_lower_case: outputs = outputs.lower() return outputs def _tokenize(self, text: str) -> list[str]: """Tokenize a string.""" text = self.preprocess_text(text) pieces = self.sp_model.encode(text, out_type=str) new_pieces = [] for piece in pieces: if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit(): cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, '')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) return new_pieces def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format: - single sequence: `X <sep> <cls>` - pair of sequences: `A <sep> B <sep> <cls>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return token_ids_0 + sep + cls return token_ids_0 + sep + token_ids_1 + sep + cls def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1, 1] return [0] * len(token_ids_0) + [1, 1] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls_segment_id = [2] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] + cls_segment_id return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def _decode(self, *args, **kwargs): text = super()._decode(*args, **kwargs) text = text.replace(' ', '').replace('▂', ' ').replace('▃', '\n') return text
@requires(backends=('sentencepiece',)) class CpmTokenizer(PreTrainedTokenizer): '''Runs pre-tokenization with Jieba-RS segmentation tool. It is used in CPM models.''' def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', additional_special_tokens=['<eop>', '<eod>'], sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None: ''' Construct a CPM tokenizer. Based on [Jieba-RS](https://pypi.org/project/rjieba/) and [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `True`): Whether to lowercase the input when tokenizing. remove_space (`bool`, *optional*, defaults to `True`): Whether to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (`bool`, *optional*, defaults to `False`): Whether to keep accents when tokenizing. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"<sep>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"<cls>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`list[str]`, *optional*, defaults to `["<eop>", "<eod>"]`): Additional special tokens used by the tokenizer. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). ''' pass @property def vocab_size(self): pass def get_vocab(self): pass def __getstate__(self): pass def __setstate__(self, d): pass def preprocess_text(self, inputs): pass def _tokenize(self, text: str) -> list[str]: '''Tokenize a string.''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (strings for sub-words) in a single string.''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format: - single sequence: `X <sep> <cls>` - pair of sequences: `A <sep> B <sep> <cls>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def _decode(self, *args, **kwargs): pass
18
9
19
2
10
7
2
0.75
1
6
0
0
15
10
15
104
313
52
149
68
109
112
101
43
84
5
3
4
34
1,411
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpm/tokenization_cpm_fast.py
transformers.models.cpm.tokenization_cpm_fast.CpmTokenizerFast
import os from ...tokenization_utils_fast import AddedToken, PreTrainedTokenizerFast from shutil import copyfile from typing import Optional class CpmTokenizerFast(PreTrainedTokenizerFast): """Runs pre-tokenization with Jieba-RS segmentation tool. It is used in CPM models.""" def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', additional_special_tokens=['<eop>', '<eod>'], **kwargs): """ Construct a CPM tokenizer. Based on [Jieba-RS](https://pypi.org/project/rjieba/) and [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `True`): Whether to lowercase the input when tokenizing. remove_space (`bool`, *optional*, defaults to `True`): Whether to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (`bool`, *optional*, defaults to `False`): Whether to keep accents when tokenizing. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"<sep>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"<cls>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`list[str]`, *optional*, defaults to `["<eop>", "<eod>"]`): Additional special tokens used by the tokenizer. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs) self._pad_token_type_id = 3 self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file try: import rjieba except ModuleNotFoundError as error: raise error.__class__('You need to install rjieba to use CpmTokenizer or CpmTokenizerFast. See https://pypi.org/project/rjieba/ for installation.') self.jieba = rjieba self.translator = str.maketrans(' \n', '▂▃') def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format: - single sequence: `X <sep> <cls>` - pair of sequences: `A <sep> B <sep> <cls>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return token_ids_0 + sep + cls return token_ids_0 + sep + token_ids_1 + sep + cls def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls_segment_id = [2] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] + cls_segment_id return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.') if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs): batch_text_or_text_pairs = [' '.join([x.translate(self.translator) for x in self.jieba.cut(text, False)]) for text in batch_text_or_text_pairs] return super()._batch_encode_plus(batch_text_or_text_pairs, *args, **kwargs) def _decode(self, *args, **kwargs): text = super()._decode(*args, **kwargs) text = text.replace(' ', '').replace('▂', ' ').replace('▃', '\n') return text
class CpmTokenizerFast(PreTrainedTokenizerFast): '''Runs pre-tokenization with Jieba-RS segmentation tool. It is used in CPM models.''' def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', additional_special_tokens=['<eop>', '<eod>'], **kwargs): ''' Construct a CPM tokenizer. Based on [Jieba-RS](https://pypi.org/project/rjieba/) and [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `True`): Whether to lowercase the input when tokenizing. remove_space (`bool`, *optional*, defaults to `True`): Whether to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (`bool`, *optional*, defaults to `False`): Whether to keep accents when tokenizing. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"<sep>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"<cls>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`list[str]`, *optional*, defaults to `["<eop>", "<eod>"]`): Additional special tokens used by the tokenizer. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). ''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format: - single sequence: `X <sep> <cls>` - pair of sequences: `A <sep> B <sep> <cls>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs): pass def _decode(self, *args, **kwargs): pass
7
4
28
4
13
11
2
0.9
1
5
0
0
7
7
7
95
209
32
93
44
63
84
46
22
37
5
3
1
16
1,412
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/configuration_cpmant.py
transformers.models.cpmant.configuration_cpmant.CpmAntConfig
from ...configuration_utils import PretrainedConfig class CpmAntConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`CpmAntModel`]. It is used to instantiate an CPMAnt model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CPMAnt [openbmb/cpm-ant-10b](https://huggingface.co/openbmb/cpm-ant-10b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30720): Vocabulary size of the CPMAnt model. Defines the number of different tokens that can be represented by the `input` passed when calling [`CpmAntModel`]. hidden_size (`int`, *optional*, defaults to 4096): Dimension of the encoder layers. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads in the Transformer encoder. dim_head (`int`, *optional*, defaults to 128): Dimension of attention heads for each attention layer in the Transformer encoder. dim_ff (`int`, *optional*, defaults to 10240): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 48): Number of layers of the Transformer encoder. dropout_p (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder. position_bias_num_buckets (`int`, *optional*, defaults to 512): The number of position_bias buckets. position_bias_max_distance (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. init_std (`float`, *optional*, defaults to 1.0): Initialize parameters with std = init_std. prompt_types (`int`, *optional*, defaults to 32): The type of prompt. prompt_length (`int`, *optional*, defaults to 32): The length of prompt. segment_types (`int`, *optional*, defaults to 32): The type of segment. use_cache (`bool`, *optional*, defaults to `True`): Whether to use cache. Example: ```python >>> from transformers import CpmAntModel, CpmAntConfig >>> # Initializing a CPMAnt cpm-ant-10b style configuration >>> configuration = CpmAntConfig() >>> # Initializing a model from the cpm-ant-10b style configuration >>> model = CpmAntModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'cpmant' def __init__(self, vocab_size: int=30720, hidden_size: int=4096, num_attention_heads: int=32, dim_head: int=128, dim_ff: int=10240, num_hidden_layers: int=48, dropout_p: int=0.0, position_bias_num_buckets: int=512, position_bias_max_distance: int=2048, eps: int=1e-06, init_std: float=1.0, prompt_types: int=32, prompt_length: int=32, segment_types: int=32, use_cache: bool=True, **kwargs): super().__init__(**kwargs) self.prompt_types = prompt_types self.prompt_length = prompt_length self.segment_types = segment_types self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.dim_head = dim_head self.dim_ff = dim_ff self.num_hidden_layers = num_hidden_layers self.position_bias_num_buckets = position_bias_num_buckets self.position_bias_max_distance = position_bias_max_distance self.dropout_p = dropout_p self.eps = eps self.use_cache = use_cache self.vocab_size = vocab_size self.init_std = init_std
class CpmAntConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`CpmAntModel`]. It is used to instantiate an CPMAnt model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CPMAnt [openbmb/cpm-ant-10b](https://huggingface.co/openbmb/cpm-ant-10b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30720): Vocabulary size of the CPMAnt model. Defines the number of different tokens that can be represented by the `input` passed when calling [`CpmAntModel`]. hidden_size (`int`, *optional*, defaults to 4096): Dimension of the encoder layers. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads in the Transformer encoder. dim_head (`int`, *optional*, defaults to 128): Dimension of attention heads for each attention layer in the Transformer encoder. dim_ff (`int`, *optional*, defaults to 10240): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 48): Number of layers of the Transformer encoder. dropout_p (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder. position_bias_num_buckets (`int`, *optional*, defaults to 512): The number of position_bias buckets. position_bias_max_distance (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. init_std (`float`, *optional*, defaults to 1.0): Initialize parameters with std = init_std. prompt_types (`int`, *optional*, defaults to 32): The type of prompt. prompt_length (`int`, *optional*, defaults to 32): The length of prompt. segment_types (`int`, *optional*, defaults to 32): The type of segment. use_cache (`bool`, *optional*, defaults to `True`): Whether to use cache. Example: ```python >>> from transformers import CpmAntModel, CpmAntConfig >>> # Initializing a CPMAnt cpm-ant-10b style configuration >>> configuration = CpmAntConfig() >>> # Initializing a model from the cpm-ant-10b style configuration >>> model = CpmAntModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size: int=30720, hidden_size: int=4096, num_attention_heads: int=32, dim_head: int=128, dim_ff: int=10240, num_hidden_layers: int=48, dropout_p: int=0.0, position_bias_num_buckets: int=512, position_bias_max_distance: int=2048, eps: int=1e-06, init_std: float=1.0, prompt_types: int=32, prompt_length: int=32, segment_types: int=32, use_cache: bool=True, **kwargs): pass
2
1
35
0
35
0
1
1.35
1
4
0
0
1
15
1
1
96
9
37
36
17
50
19
18
17
1
1
0
1
1,413
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntAttention
import torch from .configuration_cpmant import CpmAntConfig import math from ...cache_utils import Cache, DynamicCache from typing import Optional, Union from torch import nn import torch.nn.functional as F class CpmAntAttention(nn.Module): def __init__(self, config: CpmAntConfig, layer_idx=None): super().__init__() self.dim_model = config.hidden_size self.num_heads = config.num_attention_heads self.dim_head = config.dim_head self.layer_idx = layer_idx self.project_q = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False) self.project_k = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False) self.project_v = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False) self.attention_out = nn.Linear(self.num_heads * self.dim_head, self.dim_model, bias=False) self.softmax = torch.nn.Softmax(dim=-1) if config.dropout_p is not None: self.dropout = torch.nn.Dropout(p=config.dropout_p) else: self.dropout = None def forward(self, hidden_q: torch.Tensor, hidden_kv: torch.Tensor, attention_mask: torch.BoolTensor, position_bias: torch.Tensor, output_attentions: Optional[bool]=False, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None): """ Args: hidden_q (`torch.Tensor`): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences. hidden_kv (`torch.Tensor` of shape `(batch, len_k, dim_model)`)): Tensor *key_value* and *query* of shape `(batch, len_k, dim_model)` attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Avoid invalid areas to participate in the calculation of self-attention. position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Provide positional information to self-attention block. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Cache`, *optional*): Cached past key and value projection states. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ batch_size = hidden_q.size(0) len_q = hidden_q.size(1) len_k = hidden_kv.size(1) query = self.project_q(hidden_q) key = self.project_k(hidden_kv) value = self.project_v(hidden_kv) query = query.view(batch_size, len_q, self.num_heads, self.dim_head).permute(0, 2, 1, 3) key = key.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3) value = value.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3) if past_key_values is not None: key, value = past_key_values.update(key, value, self.layer_idx, {'cache_position': cache_position}) len_k = key.size(-2) score = torch.matmul(query, key.transpose(-1, -2)) / math.sqrt(self.dim_head) score = score + position_bias score = torch.masked_fill(score, attention_mask.view(batch_size, 1, len_q, len_k) == torch.tensor(False), torch.scalar_tensor(float('-inf'), device=score.device, dtype=score.dtype)) score = self.softmax(score) score = torch.masked_fill(score, attention_mask.view(batch_size, 1, len_q, len_k) == torch.tensor(False), torch.scalar_tensor(0, device=score.device, dtype=score.dtype)) if output_attentions: attn_weights = score else: attn_weights = None if self.dropout is not None: score = self.dropout(score) score = torch.matmul(score, value) score = score.view(batch_size, self.num_heads, len_q, self.dim_head).permute(0, 2, 1, 3) score = score.contiguous().view(batch_size, len_q, self.num_heads * self.dim_head) score = self.attention_out(score) return (score, attn_weights)
class CpmAntAttention(nn.Module): def __init__(self, config: CpmAntConfig, layer_idx=None): pass def forward(self, hidden_q: torch.Tensor, hidden_kv: torch.Tensor, attention_mask: torch.BoolTensor, position_bias: torch.Tensor, output_attentions: Optional[bool]=False, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None): ''' Args: hidden_q (`torch.Tensor`): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences. hidden_kv (`torch.Tensor` of shape `(batch, len_k, dim_model)`)): Tensor *key_value* and *query* of shape `(batch, len_k, dim_model)` attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Avoid invalid areas to participate in the calculation of self-attention. position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Provide positional information to self-attention block. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Cache`, *optional*): Cached past key and value projection states. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). ''' pass
3
1
50
8
32
10
4
0.31
1
5
1
0
2
9
2
12
102
17
65
29
53
20
46
20
43
5
1
1
7
1,414
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntDenseGatedACT
import torch from .configuration_cpmant import CpmAntConfig from torch import nn import torch.nn.functional as F class CpmAntDenseGatedACT(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.w_0 = nn.Linear(config.hidden_size, config.dim_ff, bias=False) self.w_1 = nn.Linear(config.hidden_size, config.dim_ff, bias=False) self.act = torch.nn.GELU() def forward(self, hidden_states: torch.Tensor): """Transform an input tensor from one feature space to another via a nonlinear operation Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`) """ gate_score = self.act(self.w_0(hidden_states)) hidden_states = self.w_1(hidden_states) hidden_states = gate_score * hidden_states return hidden_states
class CpmAntDenseGatedACT(nn.Module): def __init__(self, config: CpmAntConfig): pass def forward(self, hidden_states: torch.Tensor): '''Transform an input tensor from one feature space to another via a nonlinear operation Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`) ''' pass
3
1
8
1
5
2
1
0.36
1
3
1
0
2
3
2
12
18
3
11
7
8
4
11
7
8
1
1
0
2
1,415
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntEncoder
from .configuration_cpmant import CpmAntConfig import torch from typing import Optional, Union import torch.nn.functional as F from torch import nn from ...cache_utils import Cache, DynamicCache class CpmAntEncoder(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.num_layers = config.num_hidden_layers self.layers = nn.ModuleList([CpmAntTransformerBlock(config, layer_idx=i) for i in range(self.num_layers)]) self.output_layernorm = CpmAntLayerNorm(config) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: torch.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None): """ Args: hidden_states (`torch.Tensor`): Input to the layer of shape `(batch, seq_len, dim_model)` attention_mask (`torch.Tensor`): Avoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)` position_bias (`torch.Tensor`): Provides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. past_key_values (`Cache`, *optional*): Cached past key and value projection states use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer(hidden_states, attention_mask, position_bias, output_attentions=output_attentions, past_key_values=past_key_values, use_cache=use_cache) hidden_states, attn_weights = layer_outputs if output_attentions: all_self_attns += (attn_weights,) hidden_states = self.output_layernorm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) return (hidden_states, all_hidden_states, all_self_attns)
class CpmAntEncoder(nn.Module): def __init__(self, config: CpmAntConfig): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: torch.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None): ''' Args: hidden_states (`torch.Tensor`): Input to the layer of shape `(batch, seq_len, dim_model)` attention_mask (`torch.Tensor`): Avoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)` position_bias (`torch.Tensor`): Provides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. past_key_values (`Cache`, *optional*): Cached past key and value projection states use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). ''' pass
3
1
31
3
19
9
6
0.46
1
8
3
0
2
3
2
12
63
6
39
21
27
18
23
12
20
10
1
2
11
1,416
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntFFNBlock
import torch.nn.functional as F from torch import nn import torch from .configuration_cpmant import CpmAntConfig class CpmAntFFNBlock(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.layernorm_before_ffn = CpmAntLayerNorm(config) self.ffn = CpmAntFeedForward(config) if config.dropout_p: self.dropout = torch.nn.Dropout(config.dropout_p) else: self.dropout = None def forward(self, hidden_states: torch.Tensor): """ Args: hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`): Hidden states before feed forward layer. """ ln_outputs = self.layernorm_before_ffn(hidden_states) outputs = self.ffn(ln_outputs) if self.dropout is not None: outputs = self.dropout(outputs) hidden_states = hidden_states + outputs return hidden_states
class CpmAntFFNBlock(nn.Module): def __init__(self, config: CpmAntConfig): pass def forward(self, hidden_states: torch.Tensor): ''' Args: hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`): Hidden states before feed forward layer. ''' pass
3
1
12
0
9
3
2
0.26
1
5
3
0
2
3
2
12
25
1
19
11
13
5
15
8
12
2
1
1
4
1,417
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntFeedForward
from .configuration_cpmant import CpmAntConfig import torch from torch import nn import torch.nn.functional as F class CpmAntFeedForward(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.w_in = CpmAntDenseGatedACT(config) if config.dropout_p is not None: self.dropout = torch.nn.Dropout(config.dropout_p) else: self.dropout = None self.w_out = nn.Linear(config.dim_ff, config.hidden_size, bias=False) def forward(self, hidden_states: torch.Tensor): """ Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`) """ hidden_states = self.w_in(hidden_states) if self.dropout is not None: hidden_states = self.dropout(hidden_states) hidden_states = self.w_out(hidden_states) return hidden_states
class CpmAntFeedForward(nn.Module): def __init__(self, config: CpmAntConfig): pass def forward(self, hidden_states: torch.Tensor): ''' Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`) ''' pass
3
1
11
2
7
2
2
0.27
1
4
2
0
2
3
2
12
24
5
15
6
12
4
14
6
11
2
1
1
4
1,418
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntForCausalLM
from typing import Optional, Union from torch import nn import torch import torch.nn.functional as F from torch.nn import CrossEntropyLoss from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from .configuration_cpmant import CpmAntConfig from ...generation import GenerationMixin from ...cache_utils import Cache, DynamicCache from ...utils import auto_docstring, logging @auto_docstring(custom_intro='\n The CPMAnt Model with a language modeling head on top (linear layer with weights tied to the input embeddings).\n ') class CpmAntForCausalLM(CpmAntPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: CpmAntConfig): super().__init__(config) self.cpmant = CpmAntModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size + config.prompt_types * config.prompt_length, bias=False) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, CausalLMOutputWithPast]: """ input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Example: Text Generation with CpmAntForCausalLM. ```python >>> from transformers import CPMAntTokenizer, CpmAntForCausalLM >>> texts = "今天天气不错," >>> model = CpmAntForCausalLM.from_pretrained("openbmb/cpm-ant-10b") >>> tokenizer = CPMAntTokenizer.from_pretrained("openbmb/cpm-ant-10b") >>> input_ids = tokenizer(texts, return_tensors="pt") >>> outputs = model.generate(**input_ids) >>> output_texts = tokenizer.batch_decode(outputs) >>> print(output_texts) ['今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的'] ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict model_output = self.cpmant(input_ids, output_attentions, output_hidden_states, past_key_values, use_cache, return_dict, cache_position) hidden_states = model_output.last_hidden_state if return_dict else model_output[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: loss_func = CrossEntropyLoss() loss = loss_func(logits.view(-1, logits.size(-1)), labels.view(-1)) if not return_dict: output = (logits,) + model_output[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=model_output.past_key_values, hidden_states=model_output.hidden_states, attentions=model_output.attentions) def get_input_embeddings(self): return self.cpmant.input_embedding def set_input_embeddings(self, embeddings): self.cpmant.input_embedding = embeddings def _reorder_cache(self, past_key_values, beam_idx): past_key_values = [list(each) if each is not None else each for each in past_key_values] for key_value_layer in past_key_values: key_value_layer[0] = key_value_layer[0][beam_idx] key_value_layer[1] = key_value_layer[1][beam_idx] return past_key_values
@auto_docstring(custom_intro='\n The CPMAnt Model with a language modeling head on top (linear layer with weights tied to the input embeddings).\n ') class CpmAntForCausalLM(CpmAntPreTrainedModel, GenerationMixin): def __init__(self, config: CpmAntConfig): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, CausalLMOutputWithPast]: ''' input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Example: Text Generation with CpmAntForCausalLM. ```python >>> from transformers import CPMAntTokenizer, CpmAntForCausalLM >>> texts = "今天天气不错," >>> model = CpmAntForCausalLM.from_pretrained("openbmb/cpm-ant-10b") >>> tokenizer = CPMAntTokenizer.from_pretrained("openbmb/cpm-ant-10b") >>> input_ids = tokenizer(texts, return_tensors="pt") >>> outputs = model.generate(**input_ids) >>> output_texts = tokenizer.batch_decode(outputs) >>> print(output_texts) ['今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的'] ``` ''' pass def get_input_embeddings(self): pass def set_input_embeddings(self, embeddings): pass def _reorder_cache(self, past_key_values, beam_idx): pass
8
1
15
2
8
6
2
0.66
2
7
3
0
7
2
7
8
118
18
61
30
36
40
34
18
26
6
2
1
14
1,419
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntIntermediate
import torch.nn.functional as F import torch from torch import nn from ...activations import ACT2FN class CpmAntIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class CpmAntIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
1,420
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntLayerNorm
from .configuration_cpmant import CpmAntConfig import torch.nn.functional as F from torch import nn import torch class CpmAntLayerNorm(nn.Module): """ We use Root Mean Square (RMS) Layer Normalization, please see https://huggingface.co/papers/1910.07467 for details." """ def __init__(self, config: CpmAntConfig): super().__init__() self.eps = config.eps self.dim_norm = config.hidden_size self.weight = nn.Parameter(torch.empty(config.hidden_size)) def forward(self, hidden_states: torch.Tensor): """ Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`) """ if hidden_states.size(-1) != self.dim_norm: raise AssertionError('hidden_states.size(-1) != self.dim_norm') old_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(dim=-1, keepdim=True) hidden_states = (hidden_states * torch.rsqrt(variance + self.eps)).to(old_dtype) * self.weight return hidden_states
class CpmAntLayerNorm(nn.Module): ''' We use Root Mean Square (RMS) Layer Normalization, please see https://huggingface.co/papers/1910.07467 for details." ''' def __init__(self, config: CpmAntConfig): pass def forward(self, hidden_states: torch.Tensor): ''' Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`) ''' pass
3
2
9
1
6
2
2
0.54
1
4
1
0
2
3
2
12
23
3
13
8
10
7
13
8
10
2
1
1
3
1,421
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntModel
from ...cache_utils import Cache, DynamicCache import torch.nn.functional as F from .configuration_cpmant import CpmAntConfig from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from typing import Optional, Union from ...utils import auto_docstring, logging from torch import nn import torch @auto_docstring class CpmAntModel(CpmAntPreTrainedModel): def __init__(self, config: CpmAntConfig): super().__init__(config) self.encoder = CpmAntEncoder(config) self.segment_embedding = nn.Embedding(config.segment_types, config.hidden_size) self.input_embedding = nn.Embedding(config.vocab_size + config.prompt_types * config.prompt_length, config.hidden_size) self.position_bias = CpmAntSegmentPositionEmbedding(config) self.prompt_length = config.prompt_length self.vocab_size = config.vocab_size self.post_init() def get_input_embeddings(self): return self.input_embedding def set_input_embeddings(self, embeddings, **kwargs): self.input_embedding = embeddings def _prepare_attention_mask(self, input_ids, span, context, length): batch = input_ids.size(0) seqlen = input_ids.size(1) device = input_ids.device directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(seqlen, device=device).view(-1, 1) attention_mask = context[:, None, :] | context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen) attention_mask = attention_mask & (span[:, None, :] == span[:, :, None]) mask_1d = torch.tensor(list(range(seqlen - self.prompt_length))[::-1], device=device)[None, :].repeat(batch, 1) < length[:, None] mask_1d = torch.cat((torch.ones(batch, self.prompt_length, device=device).bool(), mask_1d), dim=1) attention_mask = mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask return attention_mask @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutputWithPast]: """ input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if input_ids.dtype != torch.int32: input_ids = input_ids.to(torch.int32) dtype, device = (input_ids.dtype, input_ids.device) segment = torch.where(input_ids != 0, 2, 0).to(dtype=dtype, device=device) length = (segment != 0).sum(-1).to(dtype=dtype, device=device) input_ids = torch.cat((torch.arange(self.prompt_length * 2 + self.vocab_size, self.prompt_length * 3 + self.vocab_size, dtype=dtype, device=device).repeat(input_ids.size(0), 1), input_ids), dim=1) batch, seq_length = input_ids.size() segment = torch.cat((torch.zeros(batch, self.prompt_length, dtype=dtype, device=device), segment), dim=1) context = torch.full((batch, seq_length), 1, dtype=dtype, device=device) position = torch.arange(seq_length, dtype=dtype, device=device).repeat(batch, 1) span = torch.full((batch, seq_length), 0, dtype=dtype, device=device) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `DynamicCache` instead, e.g. `past_key_values=DynamicCache.from_legacy_cache(past_key_values)`.') past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_length = past_key_values.get_seq_length() if past_key_values is not None else 0 input_ids = input_ids.contiguous() hidden_states = self.input_embedding(input_ids) segment_states = self.segment_embedding(segment) if past_length != 0: segment_states = segment_states[:, -1:, :] hidden_states = hidden_states + segment_states attention_mask = self._prepare_attention_mask(input_ids, span, context, length) position_bias = self.position_bias(position, position, segment, segment) attention_mask = attention_mask[:, past_length:, :] position_bias = position_bias[:, :, past_length:, :] hidden_states = hidden_states[:, past_length:, :] hidden_states, all_hidden_states, all_attentions = self.encoder(hidden_states, attention_mask, position_bias, output_attentions, output_hidden_states, past_key_values, use_cache, cache_position) if past_length == 0: hidden_states = hidden_states[:, self.prompt_length:, :] if all_attentions is not None: new_attentions = () for attention in all_attentions: new_attentions += (attention[:, :, self.prompt_length:, self.prompt_length:],) all_attentions = new_attentions if all_hidden_states is not None: new_hidden_states = () for hidden_state in all_hidden_states: new_hidden_states += (hidden_state[:, self.prompt_length:, :],) all_hidden_states = new_hidden_states if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions)
@auto_docstring class CpmAntModel(CpmAntPreTrainedModel): def __init__(self, config: CpmAntConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, embeddings, **kwargs): pass def _prepare_attention_mask(self, input_ids, span, context, length): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutputWithPast]: ''' input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) ''' pass
8
1
26
2
23
1
3
0.02
1
10
4
0
5
6
5
6
139
13
123
45
102
3
72
35
66
13
2
3
17
1,422
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntOutput
import torch.nn.functional as F import torch from torch import nn class CpmAntOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class CpmAntOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
1,423
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntPreTrainedModel
from torch import nn from .configuration_cpmant import CpmAntConfig from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging @auto_docstring class CpmAntPreTrainedModel(PreTrainedModel): config: CpmAntConfig base_model_prefix = 'cpmant' def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, CpmAntLayerNorm): module.weight.data.fill_(1.0) elif isinstance(module, CpmAntSegmentPositionEmbedding): module.relative_attention_bias.data.normal_(mean=0.0, std=self.config.init_std)
@auto_docstring class CpmAntPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
17
0
16
1
8
0.26
1
2
2
2
1
0
1
1
26
2
19
4
17
5
15
4
13
8
1
2
8
1,424
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntSegmentPositionEmbedding
import torch from torch import nn from .configuration_cpmant import CpmAntConfig import math import torch.nn.functional as F class CpmAntSegmentPositionEmbedding(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.num_heads = config.num_attention_heads self.num_buckets = config.position_bias_num_buckets self.max_distance = config.position_bias_max_distance self.num_segments = config.segment_types self.relative_attention_bias = nn.Parameter(torch.empty(config.segment_types * config.segment_types + config.position_bias_num_buckets, config.num_attention_heads)) def forward(self, key_pos: torch.Tensor, query_pos: torch.Tensor, key_segment: torch.Tensor, query_segment: torch.Tensor): with torch.no_grad(): batch = key_pos.size(0) keylen = key_pos.size(1) querylen = query_pos.size(1) if key_pos.size(0) != query_pos.size(0): raise AssertionError(f'key_pos.size(0) should be equal to query_pos.size(0), but got {key_pos.size(0)} and {query_pos.size(0)}!') if keylen != key_segment.size(1) or querylen != query_segment.size(1): raise AssertionError(f'keylen should be equal to key_segment.size(1), but got {keylen} and {key_segment.size(1)}!') if querylen != query_segment.size(1): raise AssertionError(f'querylen should be equal to query_segment.size(1), but got {querylen} and {query_segment.size(1)}!') key_pos = key_pos.view(batch, -1, keylen) query_pos = query_pos.view(batch, querylen, -1) key_segment = key_segment.view(batch, -1, keylen) query_segment = query_segment.view(batch, querylen, -1) relative_position_bucket = self._segment_relative_position_bucket(query_segment, key_segment) relative_position_bucket = relative_position_bucket + self.num_buckets absolute_position_bucket = self._position_bucket(torch.arange(keylen, dtype=torch.int32, device=relative_position_bucket.device)[None, :] - torch.arange(querylen, dtype=torch.int32, device=relative_position_bucket.device)[:, None], num_buckets=self.num_buckets, max_distance=self.max_distance) relative_position_bucket = torch.where(key_segment == query_segment, absolute_position_bucket[None, :, :], relative_position_bucket) embeds = F.embedding(relative_position_bucket, self.relative_attention_bias) embeds = embeds.permute(0, 3, 1, 2).contiguous() return embeds def _segment_relative_position_bucket(self, query_segment, key_segment): return query_segment * self.num_segments + key_segment def _position_bucket(self, relative_position, num_buckets=32, max_distance=128): relative_buckets = 0 num_buckets //= 2 relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets relative_position = torch.abs(relative_position) max_exact = num_buckets // 2 is_small = relative_position < max_exact relative_position_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.int32) relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) relative_buckets += torch.where(is_small, relative_position.to(torch.int32), relative_position_if_large) return relative_buckets
class CpmAntSegmentPositionEmbedding(nn.Module): def __init__(self, config: CpmAntConfig): pass def forward(self, key_pos: torch.Tensor, query_pos: torch.Tensor, key_segment: torch.Tensor, query_segment: torch.Tensor): pass def _segment_relative_position_bucket(self, query_segment, key_segment): pass def _position_bucket(self, relative_position, num_buckets=32, max_distance=128): pass
5
0
22
2
19
1
2
0.05
1
4
1
0
4
5
4
14
90
10
76
26
65
4
43
20
38
4
1
2
7
1,425
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntSelfAttentionBlock
from .configuration_cpmant import CpmAntConfig from torch import nn import torch.nn.functional as F from typing import Optional, Union from ...cache_utils import Cache, DynamicCache import torch class CpmAntSelfAttentionBlock(nn.Module): def __init__(self, config: CpmAntConfig, layer_idx=None): super().__init__() self.layernorm_before_attention = CpmAntLayerNorm(config) self.self_attention = CpmAntAttention(config, layer_idx=layer_idx) if config.dropout_p: self.dropout = torch.nn.Dropout(config.dropout_p) else: self.dropout = None def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None): """ Args: hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences. attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Avoid invalid areas to participate in the calculation of self-attention. position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Provide positional information to self-attention block. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Cache`, *optional*): Cached past key and value projection states. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ outputs = self.layernorm_before_attention(hidden_states) outputs, attn_weights = self.self_attention(outputs, outputs, attention_mask, position_bias, output_attentions, past_key_values, use_cache, cache_position) if self.dropout is not None: outputs = self.dropout(outputs) hidden_states = hidden_states + outputs return (hidden_states, attn_weights)
class CpmAntSelfAttentionBlock(nn.Module): def __init__(self, config: CpmAntConfig, layer_idx=None): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None): ''' Args: hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences. attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Avoid invalid areas to participate in the calculation of self-attention. position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Provide positional information to self-attention block. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Cache`, *optional*): Cached past key and value projection states. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). ''' pass
3
1
23
2
13
8
2
0.59
1
6
3
0
2
3
2
12
47
4
27
16
16
16
16
8
13
2
1
1
4
1,426
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/modeling_cpmant.py
transformers.models.cpmant.modeling_cpmant.CpmAntTransformerBlock
import torch.nn.functional as F import torch from .configuration_cpmant import CpmAntConfig from typing import Optional, Union from ...cache_utils import Cache, DynamicCache from torch import nn class CpmAntTransformerBlock(nn.Module): def __init__(self, config: CpmAntConfig, layer_idx=None): super().__init__() self.self_att = CpmAntSelfAttentionBlock(config, layer_idx=layer_idx) self.ffn = CpmAntFFNBlock(config) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None): """ Args: hidden_states (`torch.Tensor`): Input to the layer of shape `(batch, seq_len, dim_model)` attention_mask (`torch.Tensor`): Avoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)` position_bias (`torch.Tensor`): Provides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Cache`, *optional*): Cached past key and value projection states use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ hidden_states, attn_weights = self.self_att(hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position) hidden_states = self.ffn(hidden_states) return (hidden_states, attn_weights)
class CpmAntTransformerBlock(nn.Module): def __init__(self, config: CpmAntConfig, layer_idx=None): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None): ''' Args: hidden_states (`torch.Tensor`): Input to the layer of shape `(batch, seq_len, dim_model)` attention_mask (`torch.Tensor`): Avoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)` position_bias (`torch.Tensor`): Provides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Cache`, *optional*): Cached past key and value projection states use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). ''' pass
3
1
22
2
12
8
1
0.64
1
6
3
0
2
2
2
12
45
4
25
14
14
16
10
6
7
1
1
0
2
1,427
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/tokenization_cpmant.py
transformers.models.cpmant.tokenization_cpmant.CpmAntTokenizer
from typing import Optional from ...tokenization_utils import PreTrainedTokenizer from transformers.utils import is_rjieba_available, requires_backends import os import collections class CpmAntTokenizer(PreTrainedTokenizer): """ Construct a CPMAnt tokenizer. Based on byte-level Byte-Pair-Encoding. Args: vocab_file (`str`): Path to the vocabulary file. bod_token (`str`, *optional*, defaults to `"<d>"`): The beginning of document token. eod_token (`str`, *optional*, defaults to `"</d>"`): The end of document token. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. line_token (`str`, *optional*, defaults to `"</n>"`): The line token. space_token (`str`, *optional*, defaults to `"</_>"`): The space token. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] add_prefix_space = False def __init__(self, vocab_file, bod_token='<d>', eod_token='</d>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<unk>', line_token='</n>', space_token='</_>', padding_side='left', **kwargs): requires_backends(self, ['rjieba']) self.bod_token = bod_token self.eod_token = eod_token self.encoder = load_vocab(vocab_file) self.encoder[' '] = self.encoder[space_token] self.encoder['\n'] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1])) self.decoder = {v: k for k, v in self.encoder.items()} self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=unk_token) super().__init__(bod_token=bod_token, eod_token=eod_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, unk_token=unk_token, line_token=line_token, space_token=space_token, padding_side=padding_side, **kwargs) @property def bod_token_id(self): return self.encoder[self.bod_token] @property def eod_token_id(self): return self.encoder[self.eod_token] @property def newline_id(self): return self.encoder['\n'] @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def _tokenize(self, text): """Tokenize a string.""" output_tokens = [] for x in rjieba.cut(text, False): output_tokens.extend(self.wordpiece_tokenizer.tokenize(x)) return output_tokens def _decode(self, token_ids, **kwargs): """Decode ids into a string.""" token_ids = [i for i in token_ids if i >= 0] token_ids = [x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and (x != self.bos_token_id)] return super()._decode(token_ids, **kwargs) def check(self, token): return token in self.encoder def convert_tokens_to_string(self, tokens: list[str]) -> str: return ''.join(tokens) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory index = 0 if ' ' in self.encoder: self.encoder['</_>'] = self.encoder[' '] del self.encoder[' '] if '\n' in self.encoder: self.encoder['</n>'] = self.encoder['\n'] del self.encoder['\n'] self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1])) with open(vocab_file, 'w', encoding='utf-8') as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CPMAnt sequence has the following format: - single sequence: `[BOS] Sequence`. Args: token_ids_0 (`list[int]`): The first tokenized sequence that special tokens will be added. token_ids_1 (`list[int]`): The optional second tokenized sequence that special tokens will be added. Returns: `list[int]`: The model input with special tokens. """ if token_ids_1 is None: return [self.bos_token_id] + token_ids_0 return [self.bos_token_id] + token_ids_0 + [self.bos_token_id] + token_ids_1 def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) return [1] + [0] * len(token_ids_0)
class CpmAntTokenizer(PreTrainedTokenizer): ''' Construct a CPMAnt tokenizer. Based on byte-level Byte-Pair-Encoding. Args: vocab_file (`str`): Path to the vocabulary file. bod_token (`str`, *optional*, defaults to `"<d>"`): The beginning of document token. eod_token (`str`, *optional*, defaults to `"</d>"`): The end of document token. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. line_token (`str`, *optional*, defaults to `"</n>"`): The line token. space_token (`str`, *optional*, defaults to `"</_>"`): The space token. ''' def __init__(self, vocab_file, bod_token='<d>', eod_token='</d>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<unk>', line_token='</n>', space_token='</_>', padding_side='left', **kwargs): pass @property def bod_token_id(self): pass @property def eod_token_id(self): pass @property def newline_id(self): pass @property def vocab_size(self) -> int: pass def get_vocab(self): pass def _tokenize(self, text): '''Tokenize a string.''' pass def _decode(self, token_ids, **kwargs): '''Decode ids into a string.''' pass def check(self, token): pass def convert_tokens_to_string(self, tokens: list[str]) -> str: pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CPMAnt sequence has the following format: - single sequence: `[BOS] Sequence`. Args: token_ids_0 (`list[int]`): The first tokenized sequence that special tokens will be added. token_ids_1 (`list[int]`): The optional second tokenized sequence that special tokens will be added. Returns: `list[int]`: The model input with special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass
20
7
9
1
7
2
2
0.41
1
7
1
0
15
5
15
104
189
28
114
49
79
47
74
29
58
8
3
3
26
1,428
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cpmant/tokenization_cpmant.py
transformers.models.cpmant.tokenization_cpmant.WordpieceTokenizer
class WordpieceTokenizer: def __init__(self, vocab, unk_token='<unk>', max_input_chars_per_word=200): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, token): chars = list(token) if len(chars) > self.max_input_chars_per_word: return [self.unk_token] start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(cur_substr) start = end return sub_tokens
class WordpieceTokenizer: def __init__(self, vocab, unk_token='<unk>', max_input_chars_per_word=200): pass def tokenize(self, token): pass
3
0
14
1
13
0
4
0
0
1
0
0
2
3
2
2
30
3
27
12
24
0
26
12
23
6
0
3
7
1,429
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/ctrl/configuration_ctrl.py
transformers.models.ctrl.configuration_ctrl.CTRLConfig
from ...configuration_utils import PretrainedConfig class CTRLConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`CTRLModel`] or a [`TFCTRLModel`]. It is used to instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [Salesforce/ctrl](https://huggingface.co/Salesforce/ctrl) architecture from SalesForce. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 246534): Vocabulary size of the CTRL model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`CTRLModel`] or [`TFCTRLModel`]. n_positions (`int`, *optional*, defaults to 256): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 1280): Dimensionality of the embeddings and hidden states. dff (`int`, *optional*, defaults to 8192): Dimensionality of the inner dimension of the feed forward networks (FFN). n_layer (`int`, *optional*, defaults to 48): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. layer_norm_epsilon (`float`, *optional*, defaults to 1e-06): The epsilon to use in the layer normalization layers initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Examples: ```python >>> from transformers import CTRLConfig, CTRLModel >>> # Initializing a CTRL configuration >>> configuration = CTRLConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = CTRLModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'ctrl' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=246534, n_positions=256, n_embd=1280, dff=8192, n_layer=48, n_head=16, resid_pdrop=0.1, embd_pdrop=0.1, layer_norm_epsilon=1e-06, initializer_range=0.02, use_cache=True, **kwargs): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.dff = dff self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache super().__init__(**kwargs)
class CTRLConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`CTRLModel`] or a [`TFCTRLModel`]. It is used to instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [Salesforce/ctrl](https://huggingface.co/Salesforce/ctrl) architecture from SalesForce. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 246534): Vocabulary size of the CTRL model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`CTRLModel`] or [`TFCTRLModel`]. n_positions (`int`, *optional*, defaults to 256): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 1280): Dimensionality of the embeddings and hidden states. dff (`int`, *optional*, defaults to 8192): Dimensionality of the inner dimension of the feed forward networks (FFN). n_layer (`int`, *optional*, defaults to 48): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. layer_norm_epsilon (`float`, *optional*, defaults to 1e-06): The epsilon to use in the layer normalization layers initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Examples: ```python >>> from transformers import CTRLConfig, CTRLModel >>> # Initializing a CTRL configuration >>> configuration = CTRLConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = CTRLModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=246534, n_positions=256, n_embd=1280, dff=8192, n_layer=48, n_head=16, resid_pdrop=0.1, embd_pdrop=0.1, layer_norm_epsilon=1e-06, initializer_range=0.02, use_cache=True, **kwargs): pass
2
1
29
2
27
0
1
1.17
1
1
0
0
1
11
1
1
90
12
36
30
20
42
17
16
15
1
1
0
1
1,430
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/ctrl/modeling_ctrl.py
transformers.models.ctrl.modeling_ctrl.CTRLForSequenceClassification
from ...utils import auto_docstring, logging from ...cache_utils import Cache, DynamicCache from typing import Optional, Union from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutput @auto_docstring(custom_intro='\n The CTRL Model transformer with a sequence classification head on top (linear layer).\n [`CTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last\n token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in\n each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot\n guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last\n value in each row of the batch).\n ') class CTRLForSequenceClassification(CTRLPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = CTRLModel(config) self.classifier = nn.Linear(config.n_embd, self.num_labels, bias=False) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Example of single-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, CTRLForSequenceClassification >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl") >>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl") >>> # CTRL was trained with control codes as the first token >>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt") >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values() >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] 'LABEL_0' ``` ```python >>> import torch >>> torch.manual_seed(42) # doctest: +IGNORE_RESULT >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl", num_labels=num_labels) >>> labels = torch.tensor(1) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) 0.93 ``` Example of multi-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, CTRLForSequenceClassification >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl") >>> model = CTRLForSequenceClassification.from_pretrained( ... "Salesforce/ctrl", problem_type="multi_label_classification" ... ) >>> # CTRL was trained with control codes as the first token >>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt") >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values() >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] 'LABEL_0' ``` ```python >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl", num_labels=num_labels) >>> num_labels = len(model.config.id2label) >>> labels = torch.nn.functional.one_hot(torch.tensor([predicted_class_id]), num_classes=num_labels).to( ... torch.float ... ) >>> loss = model(**inputs, labels=labels).loss >>> loss.backward() # doctest: +IGNORE_RESULT ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.classifier(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=pooled_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
null
5
1
88
14
43
31
9
0.7
1
7
2
0
2
3
2
3
180
29
89
32
70
62
45
17
42
16
2
3
17
1,431
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/ctrl/modeling_ctrl.py
transformers.models.ctrl.modeling_ctrl.CTRLLMHeadModel
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutput from ...generation import GenerationMixin import torch from ...utils import auto_docstring, logging from ...cache_utils import Cache, DynamicCache from torch import nn from typing import Optional, Union @auto_docstring(custom_intro='\n The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ') class CTRLLMHeadModel(CTRLPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.transformer = CTRLModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor], CausalLMOutputWithPast]: """ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Example: ```python >>> import torch >>> from transformers import AutoTokenizer, CTRLLMHeadModel >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl") >>> model = CTRLLMHeadModel.from_pretrained("Salesforce/ctrl") >>> # CTRL was trained with control codes as the first token >>> inputs = tokenizer("Wikipedia The llama is", return_tensors="pt") >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values() >>> sequence_ids = model.generate(inputs["input_ids"]) >>> sequences = tokenizer.batch_decode(sequence_ids) >>> sequences ['Wikipedia The llama is a member of the family Bovidae. It is native to the Andes of Peru,'] >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> round(outputs.loss.item(), 2) 9.21 >>> list(outputs.logits.shape) [1, 5, 246534] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = self.loss_function(lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_cache=None, **kwargs): if past_key_values is not None: past_length = past_key_values.get_seq_length() if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] model_inputs = {'input_ids': input_ids, 'past_key_values': past_key_values, 'use_cache': use_cache} kwargs.pop('token_type_ids', None) for key, value in kwargs.items(): if key not in model_inputs: print(f'Warning: {key} is not a recognized input.') model_inputs[key] = value return model_inputs
@auto_docstring(custom_intro='\n The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ') class CTRLLMHeadModel(CTRLPreTrainedModel, GenerationMixin): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor], CausalLMOutputWithPast]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Example: ```python >>> import torch >>> from transformers import AutoTokenizer, CTRLLMHeadModel >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl") >>> model = CTRLLMHeadModel.from_pretrained("Salesforce/ctrl") >>> # CTRL was trained with control codes as the first token >>> inputs = tokenizer("Wikipedia The llama is", return_tensors="pt") >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values() >>> sequence_ids = model.generate(inputs["input_ids"]) >>> sequences = tokenizer.batch_decode(sequence_ids) >>> sequences ['Wikipedia The llama is a member of the family Bovidae. It is native to the Andes of Peru,'] >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> round(outputs.loss.item(), 2) 9.21 >>> list(outputs.logits.shape) [1, 5, 246534] ```''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_cache=None, **kwargs): pass
6
1
22
3
13
6
2
0.44
2
6
2
0
5
2
6
7
140
25
80
36
53
35
33
17
26
5
2
2
12
1,432
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/ctrl/modeling_ctrl.py
transformers.models.ctrl.modeling_ctrl.CTRLModel
from torch import nn from typing import Optional, Union from ...cache_utils import Cache, DynamicCache import numpy as np from ...utils import auto_docstring, logging from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutput import torch @auto_docstring class CTRLModel(CTRLPreTrainedModel): def __init__(self, config): super().__init__(config) self.d_model_size = config.n_embd self.num_layers = config.n_layer self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float) self.w = nn.Embedding(config.vocab_size, config.n_embd) self.dropout = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([EncoderLayer(config.n_embd, config.n_head, config.dff, config.resid_pdrop, layer_idx=i) for i in range(config.n_layer)]) self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) self.post_init() def get_input_embeddings(self): return self.w def set_input_embeddings(self, new_embeddings): self.w = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].multi_head_attention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutputWithPast]: """ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) Example: ```python >>> from transformers import AutoTokenizer, CTRLModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl") >>> model = CTRLModel.from_pretrained("Salesforce/ctrl") >>> # CTRL was trained with control codes as the first token >>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt") >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values() >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 5, 1280] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions use_cache = use_cache if use_cache is not None else self.config.use_cache output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `DynamicCache` instead, e.g. `past_key_values=DynamicCache.from_legacy_cache(past_key_values)`.') past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) if attention_mask is not None: if batch_size <= 0: raise ValueError('batch_size has to be defined and > 0') attention_mask = attention_mask.view(batch_size, -1) attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) attention_mask = attention_mask.to(dtype=self.dtype) attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min head_mask = self.get_head_mask(head_mask, self.config.n_layer) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) token_type_embeds = self.w(token_type_ids) token_type_embeds *= np.sqrt(self.d_model_size) else: token_type_embeds = 0 if inputs_embeds is None: inputs_embeds = self.w(input_ids) seq_len = input_shape[-1] mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(device) inputs_embeds *= np.sqrt(self.d_model_size) self.pos_encoding = self.pos_encoding.to(device) pos_embeds = self.pos_encoding[position_ids, :] hidden_states = inputs_embeds + pos_embeds + token_type_embeds hidden_states = self.dropout(hidden_states) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, h in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = h(hidden_states, mask, layer_past=past_key_values, attention_mask=attention_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = outputs[0] if output_attentions: all_attentions += (outputs[1],) hidden_states = self.layernorm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions)
@auto_docstring class CTRLModel(CTRLPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutputWithPast]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) Example: ```python >>> from transformers import AutoTokenizer, CTRLModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl") >>> model = CTRLModel.from_pretrained("Salesforce/ctrl") >>> # CTRL was trained with control codes as the first token >>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt") >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values() >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 5, 1280] ```''' pass
8
2
35
6
23
7
6
0.31
1
11
2
0
5
7
5
6
184
32
118
45
96
36
80
29
74
24
2
2
29
1,433
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/ctrl/modeling_ctrl.py
transformers.models.ctrl.modeling_ctrl.CTRLPreTrainedModel
from .configuration_ctrl import CTRLConfig from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_linear_layer from torch import nn @auto_docstring class CTRLPreTrainedModel(PreTrainedModel): config: CTRLConfig base_model_prefix = 'transformer' def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, Conv1D)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
@auto_docstring class CTRLPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights.''' pass
3
1
15
0
12
3
6
0.47
1
1
1
3
1
0
1
1
24
2
15
4
13
7
13
4
11
6
1
2
6
1,434
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/ctrl/modeling_ctrl.py
transformers.models.ctrl.modeling_ctrl.EncoderLayer
from torch import nn class EncoderLayer(nn.Module): def __init__(self, d_model_size, num_heads, dff, rate=0.1, layer_idx=None): super().__init__() self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads, layer_idx=layer_idx) self.ffn = point_wise_feed_forward_network(d_model_size, dff) self.layernorm1 = nn.LayerNorm(d_model_size, eps=1e-06) self.layernorm2 = nn.LayerNorm(d_model_size, eps=1e-06) self.dropout1 = nn.Dropout(rate) self.dropout2 = nn.Dropout(rate) def forward(self, x, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None): normed = self.layernorm1(x) attn_outputs = self.multi_head_attention(normed, normed, normed, mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) attn_output = attn_outputs[0] attn_output = self.dropout1(attn_output) out1 = x + attn_output out2 = self.layernorm2(out1) ffn_output = self.ffn(out2) ffn_output = self.dropout2(ffn_output) out2 = out1 + ffn_output outputs = (out2,) + attn_outputs[1:] return outputs
class EncoderLayer(nn.Module): def __init__(self, d_model_size, num_heads, dff, rate=0.1, layer_idx=None): pass def forward(self, x, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None): pass
3
0
19
3
16
0
1
0
1
2
1
0
2
6
2
12
39
6
33
18
28
0
21
16
18
1
1
0
2
1,435
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/ctrl/modeling_ctrl.py
transformers.models.ctrl.modeling_ctrl.MultiHeadAttention
from torch import nn from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_linear_layer class MultiHeadAttention(nn.Module): def __init__(self, d_model_size, num_heads, layer_idx=None): super().__init__() self.num_heads = num_heads self.d_model_size = d_model_size self.layer_idx = layer_idx self.depth = int(d_model_size / self.num_heads) self.Wq = nn.Linear(d_model_size, d_model_size) self.Wk = nn.Linear(d_model_size, d_model_size) self.Wv = nn.Linear(d_model_size, d_model_size) self.dense = nn.Linear(d_model_size, d_model_size) self.pruned_heads = set() def prune_heads(self, heads): attention_head_size = self.d_model_size // self.num_heads if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, attention_head_size, self.pruned_heads) self.Wq = prune_linear_layer(self.Wq, index) self.Wk = prune_linear_layer(self.Wk, index) self.Wv = prune_linear_layer(self.Wv, index) self.dense = prune_linear_layer(self.dense, index, dim=1) self.num_heads = self.num_heads - len(heads) self.d_model_size = attention_head_size * self.num_heads self.pruned_heads = self.pruned_heads.union(heads) def split_into_heads(self, x, batch_size): x = x.reshape(batch_size, -1, self.num_heads, self.depth) return x.permute([0, 2, 1, 3]) def forward(self, v, k, q, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None): batch_size = q.shape[0] q = self.Wq(q) k = self.Wk(k) v = self.Wv(v) q = self.split_into_heads(q, batch_size) k = self.split_into_heads(k, batch_size) v = self.split_into_heads(v, batch_size) if layer_past is not None: k, v = layer_past.update(k, v, self.layer_idx, {'cache_position': cache_position}) output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask) scaled_attention = output[0].permute([0, 2, 1, 3]) attn = output[1] original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size) output = self.dense(original_size_attention) return (output, attn)
class MultiHeadAttention(nn.Module): def __init__(self, d_model_size, num_heads, layer_idx=None): pass def prune_heads(self, heads): pass def split_into_heads(self, x, batch_size): pass def forward(self, v, k, q, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None): pass
5
0
18
3
15
1
2
0.03
1
3
0
0
4
8
4
14
77
13
62
34
46
2
50
23
45
4
1
1
8
1,436
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/ctrl/tokenization_ctrl.py
transformers.models.ctrl.tokenization_ctrl.CTRLTokenizer
import os import regex as re import json from typing import Optional from ...tokenization_utils import PreTrainedTokenizer class CTRLTokenizer(PreTrainedTokenizer): """ Construct a CTRL tokenizer. Based on Byte-Pair-Encoding. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. """ vocab_files_names = VOCAB_FILES_NAMES control_codes = CONTROL_CODES def __init__(self, vocab_file, merges_file, unk_token='<unk>', **kwargs): with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(unk_token=unk_token, **kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) word = tuple(list(word[:-1]) + [word[-1] + '</w>']) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = '@@ '.join(word) word = word[:-4] self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" split_tokens = [] words = re.findall('\\S+\\n?', text) for token in words: split_tokens.extend(list(self.bpe(token).split(' '))) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = ' '.join(tokens).replace('@@ ', '').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file)
class CTRLTokenizer(PreTrainedTokenizer): ''' Construct a CTRL tokenizer. Based on Byte-Pair-Encoding. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. ''' def __init__(self, vocab_file, merges_file, unk_token='<unk>', **kwargs): pass @property def vocab_size(self): pass def get_vocab(self): pass def bpe(self, token): pass def _tokenize(self, text): '''Tokenize a string.''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
11
5
12
1
10
1
3
0.19
1
9
0
0
9
4
9
98
133
20
96
37
85
18
86
32
76
9
3
3
23
1,437
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/configuration_cvt.py
transformers.models.cvt.configuration_cvt.CvtConfig
from ...configuration_utils import PretrainedConfig class CvtConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`CvtModel`]. It is used to instantiate a CvT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CvT [microsoft/cvt-13](https://huggingface.co/microsoft/cvt-13) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3]`): The kernel size of each encoder's patch embedding. patch_stride (`list[int]`, *optional*, defaults to `[4, 2, 2]`): The stride size of each encoder's patch embedding. patch_padding (`list[int]`, *optional*, defaults to `[2, 1, 1]`): The padding size of each encoder's patch embedding. embed_dim (`list[int]`, *optional*, defaults to `[64, 192, 384]`): Dimension of each of the encoder blocks. num_heads (`list[int]`, *optional*, defaults to `[1, 3, 6]`): Number of attention heads for each attention layer in each block of the Transformer encoder. depth (`list[int]`, *optional*, defaults to `[1, 2, 10]`): The number of layers in each encoder block. mlp_ratios (`list[float]`, *optional*, defaults to `[4.0, 4.0, 4.0, 4.0]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. attention_drop_rate (`list[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`): The dropout ratio for the attention probabilities. drop_rate (`list[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`): The dropout ratio for the patch embeddings probabilities. drop_path_rate (`list[float]`, *optional*, defaults to `[0.0, 0.0, 0.1]`): The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. qkv_bias (`list[bool]`, *optional*, defaults to `[True, True, True]`): The bias bool for query, key and value in attentions cls_token (`list[bool]`, *optional*, defaults to `[False, False, True]`): Whether or not to add a classification token to the output of each of the last 3 stages. qkv_projection_method (`list[string]`, *optional*, defaults to ["dw_bn", "dw_bn", "dw_bn"]`): The projection method for query, key and value Default is depth-wise convolutions with batch norm. For Linear projection use "avg". kernel_qkv (`list[int]`, *optional*, defaults to `[3, 3, 3]`): The kernel size for query, key and value in attention layer padding_kv (`list[int]`, *optional*, defaults to `[1, 1, 1]`): The padding size for key and value in attention layer stride_kv (`list[int]`, *optional*, defaults to `[2, 2, 2]`): The stride size for key and value in attention layer padding_q (`list[int]`, *optional*, defaults to `[1, 1, 1]`): The padding size for query in attention layer stride_q (`list[int]`, *optional*, defaults to `[1, 1, 1]`): The stride size for query in attention layer initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import CvtConfig, CvtModel >>> # Initializing a Cvt msft/cvt style configuration >>> configuration = CvtConfig() >>> # Initializing a model (with random weights) from the msft/cvt style configuration >>> model = CvtModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'cvt' def __init__(self, num_channels=3, patch_sizes=[7, 3, 3], patch_stride=[4, 2, 2], patch_padding=[2, 1, 1], embed_dim=[64, 192, 384], num_heads=[1, 3, 6], depth=[1, 2, 10], mlp_ratio=[4.0, 4.0, 4.0], attention_drop_rate=[0.0, 0.0, 0.0], drop_rate=[0.0, 0.0, 0.0], drop_path_rate=[0.0, 0.0, 0.1], qkv_bias=[True, True, True], cls_token=[False, False, True], qkv_projection_method=['dw_bn', 'dw_bn', 'dw_bn'], kernel_qkv=[3, 3, 3], padding_kv=[1, 1, 1], stride_kv=[2, 2, 2], padding_q=[1, 1, 1], stride_q=[1, 1, 1], initializer_range=0.02, layer_norm_eps=1e-12, **kwargs): super().__init__(**kwargs) self.num_channels = num_channels self.patch_sizes = patch_sizes self.patch_stride = patch_stride self.patch_padding = patch_padding self.embed_dim = embed_dim self.num_heads = num_heads self.depth = depth self.mlp_ratio = mlp_ratio self.attention_drop_rate = attention_drop_rate self.drop_rate = drop_rate self.drop_path_rate = drop_path_rate self.qkv_bias = qkv_bias self.cls_token = cls_token self.qkv_projection_method = qkv_projection_method self.kernel_qkv = kernel_qkv self.padding_kv = padding_kv self.stride_kv = stride_kv self.padding_q = padding_q self.stride_q = stride_q self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps
class CvtConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`CvtModel`]. It is used to instantiate a CvT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CvT [microsoft/cvt-13](https://huggingface.co/microsoft/cvt-13) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3]`): The kernel size of each encoder's patch embedding. patch_stride (`list[int]`, *optional*, defaults to `[4, 2, 2]`): The stride size of each encoder's patch embedding. patch_padding (`list[int]`, *optional*, defaults to `[2, 1, 1]`): The padding size of each encoder's patch embedding. embed_dim (`list[int]`, *optional*, defaults to `[64, 192, 384]`): Dimension of each of the encoder blocks. num_heads (`list[int]`, *optional*, defaults to `[1, 3, 6]`): Number of attention heads for each attention layer in each block of the Transformer encoder. depth (`list[int]`, *optional*, defaults to `[1, 2, 10]`): The number of layers in each encoder block. mlp_ratios (`list[float]`, *optional*, defaults to `[4.0, 4.0, 4.0, 4.0]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. attention_drop_rate (`list[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`): The dropout ratio for the attention probabilities. drop_rate (`list[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`): The dropout ratio for the patch embeddings probabilities. drop_path_rate (`list[float]`, *optional*, defaults to `[0.0, 0.0, 0.1]`): The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. qkv_bias (`list[bool]`, *optional*, defaults to `[True, True, True]`): The bias bool for query, key and value in attentions cls_token (`list[bool]`, *optional*, defaults to `[False, False, True]`): Whether or not to add a classification token to the output of each of the last 3 stages. qkv_projection_method (`list[string]`, *optional*, defaults to ["dw_bn", "dw_bn", "dw_bn"]`): The projection method for query, key and value Default is depth-wise convolutions with batch norm. For Linear projection use "avg". kernel_qkv (`list[int]`, *optional*, defaults to `[3, 3, 3]`): The kernel size for query, key and value in attention layer padding_kv (`list[int]`, *optional*, defaults to `[1, 1, 1]`): The padding size for key and value in attention layer stride_kv (`list[int]`, *optional*, defaults to `[2, 2, 2]`): The stride size for key and value in attention layer padding_q (`list[int]`, *optional*, defaults to `[1, 1, 1]`): The padding size for query in attention layer stride_q (`list[int]`, *optional*, defaults to `[1, 1, 1]`): The stride size for query in attention layer initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import CvtConfig, CvtModel >>> # Initializing a Cvt msft/cvt style configuration >>> configuration = CvtConfig() >>> # Initializing a model (with random weights) from the msft/cvt style configuration >>> model = CvtModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, num_channels=3, patch_sizes=[7, 3, 3], patch_stride=[4, 2, 2], patch_padding=[2, 1, 1], embed_dim=[64, 192, 384], num_heads=[1, 3, 6], depth=[1, 2, 10], mlp_ratio=[4.0, 4.0, 4.0], attention_drop_rate=[0.0, 0.0, 0.0], drop_rate=[0.0, 0.0, 0.0], drop_path_rate=[0.0, 0.0, 0.1], qkv_bias=[True, True, True], cls_token=[False, False, True], qkv_projection_method=['dw_bn', 'dw_bn', 'dw_bn'], kernel_qkv=[3, 3, 3], padding_kv=[1, 1, 1], stride_kv=[2, 2, 2], padding_q=[1, 1, 1], stride_q=[1, 1, 1], initializer_range=0.02, layer_norm_eps=1e-12, **kwargs): pass
2
1
47
0
47
0
1
1.27
1
1
0
0
1
21
1
1
120
9
49
48
23
62
25
24
23
1
1
0
1
1,438
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtAttention
from torch import nn from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer class CvtAttention(nn.Module): def __init__(self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, with_cls_token=True): super().__init__() self.attention = CvtSelfAttention(num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, with_cls_token) self.output = CvtSelfOutput(embed_dim, drop_rate) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_state, height, width): self_output = self.attention(hidden_state, height, width) attention_output = self.output(self_output, hidden_state) return attention_output
class CvtAttention(nn.Module): def __init__(self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, with_cls_token=True): pass def prune_heads(self, heads): pass def forward(self, hidden_state, height, width): pass
4
0
17
1
16
1
1
0.04
1
4
2
0
3
3
3
13
55
4
49
24
31
2
21
10
17
2
1
1
4
1,439
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtConvEmbeddings
from torch import nn import collections.abc class CvtConvEmbeddings(nn.Module): """ Image to Conv Embedding. """ def __init__(self, patch_size, num_channels, embed_dim, stride, padding): super().__init__() patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) self.patch_size = patch_size self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=stride, padding=padding) self.normalization = nn.LayerNorm(embed_dim) def forward(self, pixel_values): pixel_values = self.projection(pixel_values) batch_size, num_channels, height, width = pixel_values.shape hidden_size = height * width pixel_values = pixel_values.view(batch_size, num_channels, hidden_size).permute(0, 2, 1) if self.normalization: pixel_values = self.normalization(pixel_values) pixel_values = pixel_values.permute(0, 2, 1).view(batch_size, num_channels, height, width) return pixel_values
class CvtConvEmbeddings(nn.Module): ''' Image to Conv Embedding. ''' def __init__(self, patch_size, num_channels, embed_dim, stride, padding): pass def forward(self, pixel_values): pass
3
1
9
0
8
1
2
0.31
1
2
0
0
2
3
2
12
23
2
16
8
13
5
16
8
13
2
1
1
4
1,440
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtDropPath
import torch from torch import nn from typing import Optional, Union class CvtDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f'p={self.drop_prob}'
class CvtDropPath(nn.Module): '''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).''' def __init__(self, drop_prob: Optional[float]=None) -> None: pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass def extra_repr(self) -> str: pass
4
1
2
0
2
0
1
0.13
1
4
0
0
3
1
3
13
12
3
8
5
4
1
8
5
4
1
1
0
3
1,441
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtEmbeddings
from torch import nn class CvtEmbeddings(nn.Module): """ Construct the CvT embeddings. """ def __init__(self, patch_size, num_channels, embed_dim, stride, padding, dropout_rate): super().__init__() self.convolution_embeddings = CvtConvEmbeddings(patch_size=patch_size, num_channels=num_channels, embed_dim=embed_dim, stride=stride, padding=padding) self.dropout = nn.Dropout(dropout_rate) def forward(self, pixel_values): hidden_state = self.convolution_embeddings(pixel_values) hidden_state = self.dropout(hidden_state) return hidden_state
class CvtEmbeddings(nn.Module): ''' Construct the CvT embeddings. ''' def __init__(self, patch_size, num_channels, embed_dim, stride, padding, dropout_rate): pass def forward(self, pixel_values): pass
3
1
5
0
5
0
1
0.27
1
2
1
0
2
2
2
12
16
2
11
6
8
3
9
6
6
1
1
0
2
1,442
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtEncoder
from torch import nn class CvtEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.stages = nn.ModuleList([]) for stage_idx in range(len(config.depth)): self.stages.append(CvtStage(config, stage_idx)) def forward(self, pixel_values, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None hidden_state = pixel_values cls_token = None for _, stage_module in enumerate(self.stages): hidden_state, cls_token = stage_module(hidden_state) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) if not return_dict: return tuple((v for v in [hidden_state, cls_token, all_hidden_states] if v is not None)) return BaseModelOutputWithCLSToken(last_hidden_state=hidden_state, cls_token_value=cls_token, hidden_states=all_hidden_states)
class CvtEncoder(nn.Module): def __init__(self, config): pass def forward(self, pixel_values, output_hidden_states=False, return_dict=True): pass
3
0
12
2
11
0
4
0
1
6
2
0
2
2
2
12
26
4
22
10
19
0
18
10
15
5
1
2
7
1,443
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtForImageClassification
from ...modeling_outputs import ImageClassifierOutputWithNoAttention, ModelOutput from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...utils import auto_docstring, logging @auto_docstring(custom_intro='\n Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n ') class CvtForImageClassification(CvtPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.cvt = CvtModel(config, add_pooling_layer=False) self.layernorm = nn.LayerNorm(config.embed_dim[-1]) self.classifier = nn.Linear(config.embed_dim[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @auto_docstring def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.cvt(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] cls_token = outputs[1] if self.config.cls_token[-1]: sequence_output = self.layernorm(cls_token) else: batch_size, num_channels, height, width = sequence_output.shape sequence_output = sequence_output.view(batch_size, num_channels, height * width).permute(0, 2, 1) sequence_output = self.layernorm(sequence_output) sequence_output_mean = sequence_output.mean(dim=1) logits = self.classifier(sequence_output_mean) loss = None if labels is not None: if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = 'regression' elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
@auto_docstring(custom_intro='\n Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n ') class CvtForImageClassification(CvtPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
37
4
29
5
8
0.14
1
6
2
0
2
4
2
3
83
9
65
23
49
9
40
16
37
13
2
3
15
1,444
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtIntermediate
from torch import nn class CvtIntermediate(nn.Module): def __init__(self, embed_dim, mlp_ratio): super().__init__() self.dense = nn.Linear(embed_dim, int(embed_dim * mlp_ratio)) self.activation = nn.GELU() def forward(self, hidden_state): hidden_state = self.dense(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state
class CvtIntermediate(nn.Module): def __init__(self, embed_dim, mlp_ratio): pass def forward(self, hidden_state): pass
3
0
4
0
4
0
1
0
1
2
0
0
2
2
2
12
10
1
9
5
6
0
9
5
6
1
1
0
2
1,445
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtLayer
from torch import nn class CvtLayer(nn.Module): """ CvtLayer composed by attention layers, normalization and multi-layer perceptrons (mlps). """ def __init__(self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, mlp_ratio, drop_path_rate, with_cls_token=True): super().__init__() self.attention = CvtAttention(num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, with_cls_token) self.intermediate = CvtIntermediate(embed_dim, mlp_ratio) self.output = CvtOutput(embed_dim, mlp_ratio, drop_rate) self.drop_path = CvtDropPath(drop_prob=drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_before = nn.LayerNorm(embed_dim) self.layernorm_after = nn.LayerNorm(embed_dim) def forward(self, hidden_state, height, width): self_attention_output = self.attention(self.layernorm_before(hidden_state), height, width) attention_output = self_attention_output attention_output = self.drop_path(attention_output) hidden_state = attention_output + hidden_state layer_output = self.layernorm_after(hidden_state) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output, hidden_state) layer_output = self.drop_path(layer_output) return layer_output
class CvtLayer(nn.Module): ''' CvtLayer composed by attention layers, normalization and multi-layer perceptrons (mlps). ''' def __init__(self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, mlp_ratio, drop_path_rate, with_cls_token=True): pass def forward(self, hidden_state, height, width): pass
3
1
29
2
26
2
2
0.13
1
5
4
0
2
6
2
12
64
6
52
28
33
7
19
12
16
2
1
0
3
1,446
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtModel
from ...utils import auto_docstring, logging import torch from typing import Optional, Union @auto_docstring class CvtModel(CvtPreTrainedModel): def __init__(self, config, add_pooling_layer=True): """ add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.encoder = CvtEncoder(config) self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithCLSToken]: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') encoder_outputs = self.encoder(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithCLSToken(last_hidden_state=sequence_output, cls_token_value=encoder_outputs.cls_token_value, hidden_states=encoder_outputs.hidden_states)
@auto_docstring class CvtModel(CvtPreTrainedModel): def __init__(self, config, add_pooling_layer=True): ''' add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer ''' pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithCLSToken]: pass
6
2
14
1
11
1
3
0.1
1
6
2
0
3
2
3
4
52
6
42
15
25
4
19
9
15
5
2
1
8
1,447
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtOutput
from torch import nn class CvtOutput(nn.Module): def __init__(self, embed_dim, mlp_ratio, drop_rate): super().__init__() self.dense = nn.Linear(int(embed_dim * mlp_ratio), embed_dim) self.dropout = nn.Dropout(drop_rate) def forward(self, hidden_state, input_tensor): hidden_state = self.dense(hidden_state) hidden_state = self.dropout(hidden_state) hidden_state = hidden_state + input_tensor return hidden_state
class CvtOutput(nn.Module): def __init__(self, embed_dim, mlp_ratio, drop_rate): pass def forward(self, hidden_state, input_tensor): pass
3
0
5
0
5
0
1
0
1
2
0
0
2
2
2
12
11
1
10
5
7
0
10
5
7
1
1
0
2
1,448
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtPreTrainedModel
from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from torch import nn from .configuration_cvt import CvtConfig @auto_docstring class CvtPreTrainedModel(PreTrainedModel): config: CvtConfig base_model_prefix = 'cvt' main_input_name = 'pixel_values' _no_split_modules = ['CvtLayer'] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, CvtStage): if self.config.cls_token[module.stage]: module.cls_token.data = nn.init.trunc_normal_(module.cls_token.data, mean=0.0, std=self.config.initializer_range)
@auto_docstring class CvtPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
14
0
13
1
6
0.28
1
1
1
2
1
0
1
1
25
2
18
6
16
5
14
6
12
6
1
2
6
1,449
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtSelfAttention
from torch import nn import torch class CvtSelfAttention(nn.Module): def __init__(self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, with_cls_token=True, **kwargs): super().__init__() self.scale = embed_dim ** (-0.5) self.with_cls_token = with_cls_token self.embed_dim = embed_dim self.num_heads = num_heads self.convolution_projection_query = CvtSelfAttentionProjection(embed_dim, kernel_size, padding_q, stride_q, projection_method='linear' if qkv_projection_method == 'avg' else qkv_projection_method) self.convolution_projection_key = CvtSelfAttentionProjection(embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method) self.convolution_projection_value = CvtSelfAttentionProjection(embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method) self.projection_query = nn.Linear(embed_dim, embed_dim, bias=qkv_bias) self.projection_key = nn.Linear(embed_dim, embed_dim, bias=qkv_bias) self.projection_value = nn.Linear(embed_dim, embed_dim, bias=qkv_bias) self.dropout = nn.Dropout(attention_drop_rate) def rearrange_for_multi_head_attention(self, hidden_state): batch_size, hidden_size, _ = hidden_state.shape head_dim = self.embed_dim // self.num_heads return hidden_state.view(batch_size, hidden_size, self.num_heads, head_dim).permute(0, 2, 1, 3) def forward(self, hidden_state, height, width): if self.with_cls_token: cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1) batch_size, hidden_size, num_channels = hidden_state.shape hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width) key = self.convolution_projection_key(hidden_state) query = self.convolution_projection_query(hidden_state) value = self.convolution_projection_value(hidden_state) if self.with_cls_token: query = torch.cat((cls_token, query), dim=1) key = torch.cat((cls_token, key), dim=1) value = torch.cat((cls_token, value), dim=1) head_dim = self.embed_dim // self.num_heads query = self.rearrange_for_multi_head_attention(self.projection_query(query)) key = self.rearrange_for_multi_head_attention(self.projection_key(key)) value = self.rearrange_for_multi_head_attention(self.projection_value(value)) attention_score = torch.einsum('bhlk,bhtk->bhlt', [query, key]) * self.scale attention_probs = torch.nn.functional.softmax(attention_score, dim=-1) attention_probs = self.dropout(attention_probs) context = torch.einsum('bhlt,bhtv->bhlv', [attention_probs, value]) _, _, hidden_size, _ = context.shape context = context.permute(0, 2, 1, 3).contiguous().view(batch_size, hidden_size, self.num_heads * head_dim) return context
class CvtSelfAttention(nn.Module): def __init__(self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, with_cls_token=True, **kwargs): pass def rearrange_for_multi_head_attention(self, hidden_state): pass def forward(self, hidden_state, height, width): pass
4
0
25
3
21
1
2
0.05
1
2
1
0
3
11
3
13
79
11
65
41
47
3
41
27
37
3
1
1
6
1,450
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtSelfAttentionConvProjection
from torch import nn class CvtSelfAttentionConvProjection(nn.Module): def __init__(self, embed_dim, kernel_size, padding, stride): super().__init__() self.convolution = nn.Conv2d(embed_dim, embed_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=False, groups=embed_dim) self.normalization = nn.BatchNorm2d(embed_dim) def forward(self, hidden_state): hidden_state = self.convolution(hidden_state) hidden_state = self.normalization(hidden_state) return hidden_state
class CvtSelfAttentionConvProjection(nn.Module): def __init__(self, embed_dim, kernel_size, padding, stride): pass def forward(self, hidden_state): pass
3
0
8
0
8
0
1
0
1
1
0
0
2
2
2
12
18
1
17
5
14
0
9
5
6
1
1
0
2
1,451
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtSelfAttentionLinearProjection
from torch import nn class CvtSelfAttentionLinearProjection(nn.Module): def forward(self, hidden_state): batch_size, num_channels, height, width = hidden_state.shape hidden_size = height * width hidden_state = hidden_state.view(batch_size, num_channels, hidden_size).permute(0, 2, 1) return hidden_state
class CvtSelfAttentionLinearProjection(nn.Module): def forward(self, hidden_state): pass
2
0
6
0
5
1
1
0.17
1
0
0
0
1
0
1
11
7
0
6
4
4
1
6
4
4
1
1
0
1
1,452
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtSelfAttentionProjection
from torch import nn class CvtSelfAttentionProjection(nn.Module): def __init__(self, embed_dim, kernel_size, padding, stride, projection_method='dw_bn'): super().__init__() if projection_method == 'dw_bn': self.convolution_projection = CvtSelfAttentionConvProjection(embed_dim, kernel_size, padding, stride) self.linear_projection = CvtSelfAttentionLinearProjection() def forward(self, hidden_state): hidden_state = self.convolution_projection(hidden_state) hidden_state = self.linear_projection(hidden_state) return hidden_state
class CvtSelfAttentionProjection(nn.Module): def __init__(self, embed_dim, kernel_size, padding, stride, projection_method='dw_bn'): pass def forward(self, hidden_state): pass
3
0
5
0
5
0
2
0
1
3
2
0
2
2
2
12
11
1
10
5
7
0
10
5
7
2
1
1
3
1,453
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtSelfOutput
from torch import nn class CvtSelfOutput(nn.Module): """ The residual connection is defined in CvtLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, embed_dim, drop_rate): super().__init__() self.dense = nn.Linear(embed_dim, embed_dim) self.dropout = nn.Dropout(drop_rate) def forward(self, hidden_state, input_tensor): hidden_state = self.dense(hidden_state) hidden_state = self.dropout(hidden_state) return hidden_state
class CvtSelfOutput(nn.Module): ''' The residual connection is defined in CvtLayer instead of here (as is the case with other models), due to the layernorm applied before each block. ''' def __init__(self, embed_dim, drop_rate): pass def forward(self, hidden_state, input_tensor): pass
3
1
4
0
4
0
1
0.44
1
1
0
0
2
2
2
12
15
2
9
5
6
4
9
5
6
1
1
0
2
1,454
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cvt/modeling_cvt.py
transformers.models.cvt.modeling_cvt.CvtStage
from torch import nn import torch class CvtStage(nn.Module): def __init__(self, config, stage): super().__init__() self.config = config self.stage = stage if self.config.cls_token[self.stage]: self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.embed_dim[-1])) self.embedding = CvtEmbeddings(patch_size=config.patch_sizes[self.stage], stride=config.patch_stride[self.stage], num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1], embed_dim=config.embed_dim[self.stage], padding=config.patch_padding[self.stage], dropout_rate=config.drop_rate[self.stage]) drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate[self.stage], config.depth[stage], device='cpu')] self.layers = nn.Sequential(*[CvtLayer(num_heads=config.num_heads[self.stage], embed_dim=config.embed_dim[self.stage], kernel_size=config.kernel_qkv[self.stage], padding_q=config.padding_q[self.stage], padding_kv=config.padding_kv[self.stage], stride_kv=config.stride_kv[self.stage], stride_q=config.stride_q[self.stage], qkv_projection_method=config.qkv_projection_method[self.stage], qkv_bias=config.qkv_bias[self.stage], attention_drop_rate=config.attention_drop_rate[self.stage], drop_rate=config.drop_rate[self.stage], drop_path_rate=drop_path_rates[self.stage], mlp_ratio=config.mlp_ratio[self.stage], with_cls_token=config.cls_token[self.stage]) for _ in range(config.depth[self.stage])]) def forward(self, hidden_state): cls_token = None hidden_state = self.embedding(hidden_state) batch_size, num_channels, height, width = hidden_state.shape hidden_state = hidden_state.view(batch_size, num_channels, height * width).permute(0, 2, 1) if self.config.cls_token[self.stage]: cls_token = self.cls_token.expand(batch_size, -1, -1) hidden_state = torch.cat((cls_token, hidden_state), dim=1) for layer in self.layers: layer_outputs = layer(hidden_state, height, width) hidden_state = layer_outputs if self.config.cls_token[self.stage]: cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1) hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width) return (hidden_state, cls_token)
class CvtStage(nn.Module): def __init__(self, config, stage): pass def forward(self, hidden_state): pass
3
0
29
3
26
1
4
0.02
1
4
2
0
2
5
2
12
59
6
52
14
49
1
25
13
22
4
1
1
7
1,455
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/configuration_dab_detr.py
transformers.models.dab_detr.configuration_dab_detr.DabDetrConfig
from ...utils.backbone_utils import verify_backbone_config_arguments from ...configuration_utils import PretrainedConfig from ..auto import CONFIG_MAPPING class DabDetrConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`DabDetrModel`]. It is used to instantiate a DAB-DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DAB-DETR [IDEA-Research/dab_detr-base](https://huggingface.co/IDEA-Research/dab_detr-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] API. backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. backbone (`str`, *optional*, defaults to `"resnet50"`): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use pretrained weights for the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. num_queries (`int`, *optional*, defaults to 300): Number of object queries, i.e. detection slots. This is the maximal number of objects [`DabDetrModel`] can detect in a single image. For COCO, we recommend 100 queries. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in encoder. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Indicates whether the transformer model architecture is an encoder-decoder or not. activation_function (`str` or `function`, *optional*, defaults to `"prelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_size (`int`, *optional*, defaults to 256): This parameter is a general dimension parameter, defining dimensions for components such as the encoder layer and projection parameters in the decoder layer, among others. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1.0): The scaling factor used for the Xavier initialization gain in the HM Attention map module. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. class_cost (`float`, *optional*, defaults to 2): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. cls_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the classification loss in the object detection loss function. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. temperature_height (`int`, *optional*, defaults to 20): Temperature parameter to tune the flatness of positional attention (HEIGHT) temperature_width (`int`, *optional*, defaults to 20): Temperature parameter to tune the flatness of positional attention (WIDTH) query_dim (`int`, *optional*, defaults to 4): Query dimension parameter represents the size of the output vector. random_refpoints_xy (`bool`, *optional*, defaults to `False`): Whether to fix the x and y coordinates of the anchor boxes with random initialization. keep_query_pos (`bool`, *optional*, defaults to `False`): Whether to concatenate the projected positional embedding from the object query into the original query (key) in every decoder layer. num_patterns (`int`, *optional*, defaults to 0): Number of pattern embeddings. normalize_before (`bool`, *optional*, defaults to `False`): Whether we use a normalization layer in the Encoder or not. sine_position_embedding_scale (`float`, *optional*, defaults to 'None'): Scaling factor applied to the normalized positional encodings. initializer_bias_prior_prob (`float`, *optional*): The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`. If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights. Examples: ```python >>> from transformers import DabDetrConfig, DabDetrModel >>> # Initializing a DAB-DETR IDEA-Research/dab_detr-base style configuration >>> configuration = DabDetrConfig() >>> # Initializing a model (with random weights) from the IDEA-Research/dab_detr-base style configuration >>> model = DabDetrModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'dab-detr' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads'} def __init__(self, use_timm_backbone=True, backbone_config=None, backbone='resnet50', use_pretrained_backbone=True, backbone_kwargs=None, num_queries=300, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, is_encoder_decoder=True, activation_function='prelu', hidden_size=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, dilation=False, class_cost=2, bbox_cost=5, giou_cost=2, cls_loss_coefficient=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, focal_alpha=0.25, temperature_height=20, temperature_width=20, query_dim=4, random_refpoints_xy=False, keep_query_pos=False, num_patterns=0, normalize_before=False, sine_position_embedding_scale=None, initializer_bias_prior_prob=None, **kwargs): if query_dim != 4: raise ValueError('The query dimensions has to be 4.') if use_timm_backbone and backbone_kwargs is None: backbone_kwargs = {} if dilation: backbone_kwargs['output_stride'] = 16 backbone_kwargs['out_indices'] = [1, 2, 3, 4] backbone_kwargs['in_chans'] = 3 elif not use_timm_backbone and backbone in (None, 'resnet50'): if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') backbone_config = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get('model_type') config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) backbone = None dilation = None verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs) self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_queries = num_queries self.hidden_size = hidden_size self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.backbone_kwargs = backbone_kwargs self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost self.cls_loss_coefficient = cls_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.focal_alpha = focal_alpha self.query_dim = query_dim self.random_refpoints_xy = random_refpoints_xy self.keep_query_pos = keep_query_pos self.num_patterns = num_patterns self.normalize_before = normalize_before self.temperature_width = temperature_width self.temperature_height = temperature_height self.sine_position_embedding_scale = sine_position_embedding_scale self.initializer_bias_prior_prob = initializer_bias_prior_prob super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def sub_configs(self): return {'backbone_config': type(self.backbone_config)} if getattr(self, 'backbone_config', None) is not None else {}
class DabDetrConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`DabDetrModel`]. It is used to instantiate a DAB-DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DAB-DETR [IDEA-Research/dab_detr-base](https://huggingface.co/IDEA-Research/dab_detr-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] API. backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. backbone (`str`, *optional*, defaults to `"resnet50"`): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use pretrained weights for the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. num_queries (`int`, *optional*, defaults to 300): Number of object queries, i.e. detection slots. This is the maximal number of objects [`DabDetrModel`] can detect in a single image. For COCO, we recommend 100 queries. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in encoder. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Indicates whether the transformer model architecture is an encoder-decoder or not. activation_function (`str` or `function`, *optional*, defaults to `"prelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_size (`int`, *optional*, defaults to 256): This parameter is a general dimension parameter, defining dimensions for components such as the encoder layer and projection parameters in the decoder layer, among others. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1.0): The scaling factor used for the Xavier initialization gain in the HM Attention map module. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. class_cost (`float`, *optional*, defaults to 2): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. cls_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the classification loss in the object detection loss function. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. temperature_height (`int`, *optional*, defaults to 20): Temperature parameter to tune the flatness of positional attention (HEIGHT) temperature_width (`int`, *optional*, defaults to 20): Temperature parameter to tune the flatness of positional attention (WIDTH) query_dim (`int`, *optional*, defaults to 4): Query dimension parameter represents the size of the output vector. random_refpoints_xy (`bool`, *optional*, defaults to `False`): Whether to fix the x and y coordinates of the anchor boxes with random initialization. keep_query_pos (`bool`, *optional*, defaults to `False`): Whether to concatenate the projected positional embedding from the object query into the original query (key) in every decoder layer. num_patterns (`int`, *optional*, defaults to 0): Number of pattern embeddings. normalize_before (`bool`, *optional*, defaults to `False`): Whether we use a normalization layer in the Encoder or not. sine_position_embedding_scale (`float`, *optional*, defaults to 'None'): Scaling factor applied to the normalized positional encodings. initializer_bias_prior_prob (`float`, *optional*): The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`. If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights. Examples: ```python >>> from transformers import DabDetrConfig, DabDetrModel >>> # Initializing a DAB-DETR IDEA-Research/dab_detr-base style configuration >>> configuration = DabDetrConfig() >>> # Initializing a model (with random weights) from the IDEA-Research/dab_detr-base style configuration >>> model = DabDetrModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, use_timm_backbone=True, backbone_config=None, backbone='resnet50', use_pretrained_backbone=True, backbone_kwargs=None, num_queries=300, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, is_encoder_decoder=True, activation_function='prelu', hidden_size=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, dilation=False, class_cost=2, bbox_cost=5, giou_cost=2, cls_loss_coefficient=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, focal_alpha=0.25, temperature_height=20, temperature_width=20, query_dim=4, random_refpoints_xy=False, keep_query_pos=False, num_patterns=0, normalize_before=False, sine_position_embedding_scale=None, initializer_bias_prior_prob=None, **kwargs): pass @property def sub_configs(self): pass
4
1
114
3
105
7
7
0.98
1
3
0
0
1
37
1
1
232
13
111
85
68
109
60
44
58
7
1
2
7
1,456
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrAttention
import torch from typing import Optional, Union from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig class DabDetrAttention(nn.Module): """ Cross-Attention used in DAB-DETR 'DAB-DETR for Fast Training Convergence' paper. The key q_proj, k_proj, v_proj are defined outside the attention. This attention allows the dim of q, k to be different to v. """ def __init__(self, config: DabDetrConfig, bias: bool=True, is_cross: bool=False): super().__init__() self.config = config self.embed_dim = config.hidden_size * 2 if is_cross else config.hidden_size self.output_dim = config.hidden_size self.attention_heads = config.decoder_attention_heads self.attention_dropout = config.attention_dropout self.attention_head_dim = self.embed_dim // self.attention_heads if self.attention_head_dim * self.attention_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `attention_heads`: {self.attention_heads}).') self.values_head_dim = self.output_dim // self.attention_heads if self.values_head_dim * self.attention_heads != self.output_dim: raise ValueError(f'output_dim must be divisible by attention_heads (got `output_dim`: {self.output_dim} and `attention_heads`: {self.attention_heads}).') self.scaling = self.attention_head_dim ** (-0.5) self.output_proj = nn.Linear(self.output_dim, self.output_dim, bias=bias) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, key_states: Optional[torch.Tensor]=None, value_states: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, q_len, _ = hidden_states.size() query_states = hidden_states * self.scaling query_states = query_states.view(batch_size, -1, self.attention_heads, self.attention_head_dim).transpose(1, 2) key_states = key_states.view(batch_size, -1, self.attention_heads, self.attention_head_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.attention_heads, self.values_head_dim).transpose(1, 2) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_probs = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_probs, value_states) if attn_output.size() != (batch_size, self.attention_heads, q_len, self.values_head_dim): raise ValueError(f'`attn_output` should be of size {(batch_size, self.attention_heads, q_len, self.values_head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_len, self.output_dim) attn_output = self.output_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights)
class DabDetrAttention(nn.Module): ''' Cross-Attention used in DAB-DETR 'DAB-DETR for Fast Training Convergence' paper. The key q_proj, k_proj, v_proj are defined outside the attention. This attention allows the dim of q, k to be different to v. ''' def __init__(self, config: DabDetrConfig, bias: bool=True, is_cross: bool=False): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, key_states: Optional[torch.Tensor]=None, value_states: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
3
2
32
5
25
2
4
0.16
1
5
1
0
2
9
2
12
72
13
51
24
41
8
36
17
33
4
1
1
8
1,457
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrConvEncoder
import torch from ...utils.backbone_utils import load_backbone from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig class DabDetrConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by DabDetrFrozenBatchNorm2d as defined above. """ def __init__(self, config: DabDetrConfig): super().__init__() self.config = config backbone = load_backbone(config) with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = self.model.channels def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): features = self.model(pixel_values).feature_maps out = [] for feature_map in features: mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out
class DabDetrConvEncoder(nn.Module): ''' Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by DabDetrFrozenBatchNorm2d as defined above. ''' def __init__(self, config: DabDetrConfig): pass def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): pass
3
1
11
2
8
2
2
0.44
1
4
1
0
2
3
2
12
30
7
16
11
13
7
16
11
13
2
1
1
3
1,458
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrConvModel
from torch import Tensor, nn class DabDetrConvModel(nn.Module): """ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. """ def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for feature_map, mask in out: pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return (out, pos)
class DabDetrConvModel(nn.Module): ''' This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. ''' def __init__(self, conv_encoder, position_embedding): pass def forward(self, pixel_values, pixel_mask): pass
3
1
7
1
5
1
2
0.45
1
1
0
0
2
2
2
12
19
3
11
8
8
5
11
8
8
2
1
1
3
1,459
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrDecoder
from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig import torch from typing import Optional, Union from ...modeling_attn_mask_utils import _prepare_4d_attention_mask class DabDetrDecoder(DabDetrPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DabDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for DAB-DETR: - object_queries and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: DabDetrConfig """ def __init__(self, config: DabDetrConfig): super().__init__(config) self.config = config self.dropout = config.dropout self.num_layers = config.decoder_layers self.gradient_checkpointing = False self.layers = nn.ModuleList([DabDetrDecoderLayer(config, is_first=layer_id == 0) for layer_id in range(config.decoder_layers)]) self.hidden_size = config.hidden_size self.layernorm = nn.LayerNorm(self.hidden_size) self.query_scale = DabDetrMLP(self.hidden_size, self.hidden_size, self.hidden_size, 2) self.ref_point_head = DabDetrMLP(config.query_dim // 2 * self.hidden_size, self.hidden_size, self.hidden_size, 2) self.bbox_embed = None self.ref_anchor_head = DabDetrMLP(self.hidden_size, self.hidden_size, 2, 2) self.post_init() def forward(self, inputs_embeds, encoder_hidden_states, memory_key_padding_mask, object_queries, query_position_embeddings, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): """ Args: inputs_embeds (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`): The query embeddings that are passed into the decoder. encoder_hidden_states (`torch.FloatTensor` of shape `(encoder_sequence_length, batch_size, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. memory_key_padding_mask (`torch.Tensor.bool` of shape `(batch_size, sequence_length)`): The memory_key_padding_mask indicates which positions in the memory (encoder outputs) should be ignored during the attention computation, ensuring padding tokens do not influence the attention mechanism. object_queries (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(num_queries, batch_size, number_of_anchor_points)`): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds input_shape = inputs_embeds.size()[:-1] all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None intermediate = [] reference_points = query_position_embeddings.sigmoid() ref_points = [reference_points] if encoder_hidden_states is not None and memory_key_padding_mask is not None: memory_key_padding_mask = _prepare_4d_attention_mask(memory_key_padding_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) for layer_id, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) obj_center = reference_points[..., :self.config.query_dim] query_sine_embed = gen_sine_position_embeddings(obj_center, self.hidden_size) query_pos = self.ref_point_head(query_sine_embed) pos_transformation = 1 if layer_id == 0 else self.query_scale(hidden_states) query_sine_embed = query_sine_embed[..., :self.hidden_size] * pos_transformation reference_anchor_size = self.ref_anchor_head(hidden_states).sigmoid() query_sine_embed[..., self.hidden_size // 2:] *= (reference_anchor_size[..., 0] / obj_center[..., 2]).unsqueeze(-1) query_sine_embed[..., :self.hidden_size // 2] *= (reference_anchor_size[..., 1] / obj_center[..., 3]).unsqueeze(-1) layer_outputs = decoder_layer(hidden_states, None, object_queries, query_pos, query_sine_embed, encoder_hidden_states, encoder_attention_mask=memory_key_padding_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if self.bbox_embed is not None: new_reference_points = self.bbox_embed(hidden_states) new_reference_points[..., :self.config.query_dim] += inverse_sigmoid(reference_points) new_reference_points = new_reference_points[..., :self.config.query_dim].sigmoid() if layer_id != self.num_layers - 1: ref_points.append(new_reference_points) reference_points = new_reference_points.detach() intermediate.append(self.layernorm(hidden_states)) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layernorm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) output_intermediate_hidden_states = torch.stack(intermediate) output_reference_points = torch.stack(ref_points) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, output_intermediate_hidden_states, output_reference_points] if v is not None)) return DabDetrDecoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=output_intermediate_hidden_states, reference_points=output_reference_points)
class DabDetrDecoder(DabDetrPreTrainedModel): ''' Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DabDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for DAB-DETR: - object_queries and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: DabDetrConfig ''' def __init__(self, config: DabDetrConfig): pass def forward(self, inputs_embeds, encoder_hidden_states, memory_key_padding_mask, object_queries, query_position_embeddings, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): ''' Args: inputs_embeds (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`): The query embeddings that are passed into the decoder. encoder_hidden_states (`torch.FloatTensor` of shape `(encoder_sequence_length, batch_size, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. memory_key_padding_mask (`torch.Tensor.bool` of shape `(batch_size, sequence_length)`): The memory_key_padding_mask indicates which positions in the memory (encoder outputs) should be ignored during the attention computation, ensuring padding tokens do not influence the attention mechanism. object_queries (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(num_queries, batch_size, number_of_anchor_points)`): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
93
13
62
18
10
0.36
1
9
4
0
2
11
2
3
201
32
125
42
112
45
65
32
62
19
2
3
20
1,460
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrDecoderLayer
import torch from typing import Optional, Union from .configuration_dab_detr import DabDetrConfig from ...modeling_layers import GradientCheckpointingLayer class DabDetrDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: DabDetrConfig, is_first: bool=False): super().__init__() self.self_attn = DabDetrDecoderLayerSelfAttention(config) self.cross_attn = DabDetrDecoderLayerCrossAttention(config, is_first) self.mlp = DabDetrDecoderLayerFFN(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, query_sine_embed: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, query_position_embeddings=query_position_embeddings, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states, cross_attn_weights = self.cross_attn(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, query_position_embeddings=query_position_embeddings, object_queries=object_queries, encoder_attention_mask=encoder_attention_mask, query_sine_embed=query_sine_embed, output_attentions=output_attentions) hidden_states = self.mlp(hidden_states=hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
class DabDetrDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: DabDetrConfig, is_first: bool=False): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, query_sine_embed: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None): ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
32
3
18
11
2
0.57
1
7
4
0
2
3
2
12
65
7
37
19
24
21
14
9
11
2
1
1
3
1,461
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrDecoderLayerCrossAttention
import torch from typing import Optional, Union from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig class DabDetrDecoderLayerCrossAttention(nn.Module): def __init__(self, config: DabDetrConfig, is_first: bool=False): super().__init__() hidden_size = config.hidden_size self.cross_attn_query_content_proj = nn.Linear(hidden_size, hidden_size) self.cross_attn_query_pos_proj = nn.Linear(hidden_size, hidden_size) self.cross_attn_key_content_proj = nn.Linear(hidden_size, hidden_size) self.cross_attn_key_pos_proj = nn.Linear(hidden_size, hidden_size) self.cross_attn_value_proj = nn.Linear(hidden_size, hidden_size) self.cross_attn_query_pos_sine_proj = nn.Linear(hidden_size, hidden_size) self.decoder_attention_heads = config.decoder_attention_heads self.cross_attn_layer_norm = nn.LayerNorm(hidden_size) self.cross_attn = DabDetrAttention(config, is_cross=True) self.keep_query_pos = config.keep_query_pos if not self.keep_query_pos and (not is_first): self.cross_attn_query_pos_proj = None self.is_first = is_first self.dropout = config.dropout def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, query_sine_embed: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None): query_content = self.cross_attn_query_content_proj(hidden_states) key_content = self.cross_attn_key_content_proj(encoder_hidden_states) value = self.cross_attn_value_proj(encoder_hidden_states) batch_size, num_queries, n_model = query_content.shape _, height_width, _ = key_content.shape key_pos = self.cross_attn_key_pos_proj(object_queries) if self.is_first or self.keep_query_pos: query_pos = self.cross_attn_query_pos_proj(query_position_embeddings) query = query_content + query_pos key = key_content + key_pos else: query = query_content key = key_content query = query.view(batch_size, num_queries, self.decoder_attention_heads, n_model // self.decoder_attention_heads) query_sine_embed = self.cross_attn_query_pos_sine_proj(query_sine_embed) query_sine_embed = query_sine_embed.view(batch_size, num_queries, self.decoder_attention_heads, n_model // self.decoder_attention_heads) query = torch.cat([query, query_sine_embed], dim=3).view(batch_size, num_queries, n_model * 2) key = key.view(batch_size, height_width, self.decoder_attention_heads, n_model // self.decoder_attention_heads) key_pos = key_pos.view(batch_size, height_width, self.decoder_attention_heads, n_model // self.decoder_attention_heads) key = torch.cat([key, key_pos], dim=3).view(batch_size, height_width, n_model * 2) cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.cross_attn(hidden_states=query, attention_mask=encoder_attention_mask, key_states=key, value_states=value, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.cross_attn_layer_norm(hidden_states) return (hidden_states, cross_attn_weights)
class DabDetrDecoderLayerCrossAttention(nn.Module): def __init__(self, config: DabDetrConfig, is_first: bool=False): pass def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, query_sine_embed: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None): pass
3
0
41
6
34
2
3
0.04
1
5
2
0
2
12
2
12
83
12
68
36
56
3
46
27
43
3
1
1
5
1,462
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrDecoderLayerFFN
import torch from ...activations import ACT2FN from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig class DabDetrDecoderLayerFFN(nn.Module): def __init__(self, config: DabDetrConfig): super().__init__() hidden_size = config.hidden_size self.final_layer_norm = nn.LayerNorm(hidden_size) self.fc1 = nn.Linear(hidden_size, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, hidden_size) self.activation_fn = ACT2FN[config.activation_function] self.dropout = config.dropout self.activation_dropout = config.activation_dropout self.keep_query_pos = config.keep_query_pos def forward(self, hidden_states: torch.Tensor): residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return hidden_states
class DabDetrDecoderLayerFFN(nn.Module): def __init__(self, config: DabDetrConfig): pass def forward(self, hidden_states: torch.Tensor): pass
3
0
10
1
10
0
1
0
1
3
1
0
2
7
2
12
22
2
20
12
17
0
20
12
17
1
1
0
2
1,463
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrDecoderLayerSelfAttention
import torch from typing import Optional, Union from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig class DabDetrDecoderLayerSelfAttention(nn.Module): def __init__(self, config: DabDetrConfig): super().__init__() self.dropout = config.dropout self.self_attn_query_content_proj = nn.Linear(config.hidden_size, config.hidden_size) self.self_attn_query_pos_proj = nn.Linear(config.hidden_size, config.hidden_size) self.self_attn_key_content_proj = nn.Linear(config.hidden_size, config.hidden_size) self.self_attn_key_pos_proj = nn.Linear(config.hidden_size, config.hidden_size) self.self_attn_value_proj = nn.Linear(config.hidden_size, config.hidden_size) self.self_attn = DabDetrAttention(config) self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size) def forward(self, hidden_states: torch.Tensor, query_position_embeddings: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None): residual = hidden_states query_content = self.self_attn_query_content_proj(hidden_states) query_pos = self.self_attn_query_pos_proj(query_position_embeddings) key_content = self.self_attn_key_content_proj(hidden_states) key_pos = self.self_attn_key_pos_proj(query_position_embeddings) value = self.self_attn_value_proj(hidden_states) query = query_content + query_pos key = key_content + key_pos hidden_states, attn_weights = self.self_attn(hidden_states=query, attention_mask=attention_mask, key_states=key, value_states=value, output_attentions=True) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) return (hidden_states, attn_weights)
class DabDetrDecoderLayerSelfAttention(nn.Module): def __init__(self, config: DabDetrConfig): pass def forward(self, hidden_states: torch.Tensor, query_position_embeddings: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None): pass
3
0
20
2
18
0
1
0
1
5
2
0
2
8
2
12
42
5
37
26
28
0
25
20
22
1
1
0
2
1,464
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrDecoderOutput
from dataclasses import dataclass from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput import torch from typing import Optional, Union from ...utils import ModelOutput, auto_docstring, logging @dataclass @auto_docstring(custom_intro='\n Base class for outputs of the Conditional DETR decoder. This class adds one attribute to\n BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output\n of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary\n decoding losses.\n ') class DabDetrDecoderOutput(BaseModelOutputWithCrossAttentions): """ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 2 (anchor points))`): Reference points (reference points of each layer of the decoder). """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Base class for outputs of the Conditional DETR decoder. This class adds one attribute to\n BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output\n of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary\n decoding losses.\n ') class DabDetrDecoderOutput(BaseModelOutputWithCrossAttentions): ''' cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 2 (anchor points))`): Reference points (reference points of each layer of the decoder). ''' pass
3
1
0
0
0
0
0
8.67
1
0
0
0
0
0
0
0
31
2
3
3
2
26
3
3
2
0
2
0
0
1,465
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrEncoder
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from typing import Optional, Union from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig class DabDetrEncoder(DabDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`DabDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for DAB-DETR: - object_queries are added to the forward pass. Args: config: DabDetrConfig """ def __init__(self, config: DabDetrConfig): super().__init__(config) self.dropout = config.dropout self.query_scale = DabDetrMLP(config.hidden_size, config.hidden_size, config.hidden_size, 2) self.layers = nn.ModuleList([DabDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) self.norm = nn.LayerNorm(config.hidden_size) if config.normalize_before else None self.gradient_checkpointing = False self.post_init() def forward(self, inputs_embeds, attention_mask, object_queries, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): """ Args: inputs_embeds (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) object_queries (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`): Object queries that are added to the queries in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) pos_scales = self.query_scale(hidden_states) scaled_object_queries = object_queries * pos_scales layer_outputs = encoder_layer(hidden_states, attention_mask=attention_mask, object_queries=scaled_object_queries, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.norm: hidden_states = self.norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class DabDetrEncoder(DabDetrPreTrainedModel): ''' Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`DabDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for DAB-DETR: - object_queries are added to the forward pass. Args: config: DabDetrConfig ''' def __init__(self, config: DabDetrConfig): pass def forward(self, inputs_embeds, attention_mask, object_queries, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): ''' Args: inputs_embeds (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) object_queries (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`): Object queries that are added to the queries in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
51
9
30
13
8
0.57
1
8
4
0
2
5
2
3
117
23
60
23
49
34
36
15
33
14
2
2
16
1,466
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrEncoderLayer
from ...activations import ACT2FN from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig import torch from ...modeling_layers import GradientCheckpointingLayer from typing import Optional, Union class DabDetrEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: DabDetrConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = DetrAttention(config) self.self_attn_layer_norm = nn.LayerNorm(self.hidden_size) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.fc1 = nn.Linear(self.hidden_size, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.hidden_size) self.final_layer_norm = nn.LayerNorm(self.hidden_size) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: torch.Tensor, output_attentions: Optional[bool]=None): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class DabDetrEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: DabDetrConfig): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: torch.Tensor, output_attentions: Optional[bool]=None): ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
29
4
19
6
2
0.31
1
5
2
0
2
8
2
12
59
8
39
20
30
12
28
14
25
2
1
1
3
1,467
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrForObjectDetection
from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig import torch from typing import Optional, Union from ...utils import ModelOutput, auto_docstring, logging @auto_docstring(custom_intro='\n DAB_DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on\n top, for tasks such as COCO detection.\n ') class DabDetrForObjectDetection(DabDetrPreTrainedModel): _tied_weights_keys = ['bbox_predictor\\.layers\\.\\d+\\.(weight|bias)', 'model\\.decoder\\.bbox_embed\\.layers\\.\\d+\\.(weight|bias)'] def __init__(self, config: DabDetrConfig): super().__init__(config) self.config = config self.auxiliary_loss = config.auxiliary_loss self.query_dim = config.query_dim self.model = DabDetrModel(config) _bbox_embed = DabDetrMLP(config.hidden_size, config.hidden_size, 4, 3) self.class_embed = nn.Linear(config.hidden_size, config.num_labels) self.bbox_predictor = _bbox_embed self.model.decoder.bbox_embed = self.bbox_predictor self.post_init() @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): return [{'logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @auto_docstring def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[list[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], DabDetrObjectDetectionOutput]: """ decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. labels (`list[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Examples: ```python >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("IDEA-Research/dab-detr-resnet-50") >>> model = AutoModelForObjectDetection.from_pretrained("IDEA-Research/dab-detr-resnet-50") >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([(image.height, image.width)]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[0] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected remote with confidence 0.833 at location [38.31, 72.1, 177.63, 118.45] Detected cat with confidence 0.831 at location [9.2, 51.38, 321.13, 469.0] Detected cat with confidence 0.804 at location [340.3, 16.85, 642.93, 370.95] Detected remote with confidence 0.683 at location [334.48, 73.49, 366.37, 190.01] Detected couch with confidence 0.535 at location [0.52, 1.19, 640.35, 475.1] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict model_outputs = self.model(pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) reference_points = model_outputs.reference_points if return_dict else model_outputs[-1] intermediate_hidden_states = model_outputs.intermediate_hidden_states if return_dict else model_outputs[-2] logits = self.class_embed(intermediate_hidden_states[-1]) reference_before_sigmoid = inverse_sigmoid(reference_points) bbox_with_refinement = self.bbox_predictor(intermediate_hidden_states) bbox_with_refinement[..., :self.query_dim] += reference_before_sigmoid outputs_coord = bbox_with_refinement.sigmoid() pred_boxes = outputs_coord[-1] loss, loss_dict, auxiliary_outputs = (None, None, None) if labels is not None: outputs_class = None if self.config.auxiliary_loss: outputs_class = self.class_embed(intermediate_hidden_states) loss, loss_dict, auxiliary_outputs = self.loss_function(logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + model_outputs else: output = (logits, pred_boxes) + model_outputs return (loss, loss_dict) + output if loss is not None else output[:-2] return DabDetrObjectDetectionOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=model_outputs.last_hidden_state, decoder_hidden_states=model_outputs.decoder_hidden_states if output_hidden_states else None, decoder_attentions=model_outputs.decoder_attentions if output_attentions else None, cross_attentions=model_outputs.cross_attentions if output_attentions else None, encoder_last_hidden_state=model_outputs.encoder_last_hidden_state if output_hidden_states else None, encoder_hidden_states=model_outputs.encoder_hidden_states if output_hidden_states else None, encoder_attentions=model_outputs.encoder_attentions if output_attentions else None)
null
7
1
48
7
26
15
6
0.55
1
8
4
0
3
6
3
4
157
24
86
37
67
47
40
23
36
17
2
2
19
1,468
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrFrozenBatchNorm2d
import torch from torch import Tensor, nn class DabDetrFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer('weight', torch.ones(n)) self.register_buffer('bias', torch.zeros(n)) self.register_buffer('running_mean', torch.zeros(n)) self.register_buffer('running_var', torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-05 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias
class DabDetrFrozenBatchNorm2d(nn.Module): ''' BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. ''' def __init__(self, n): pass def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): pass def forward(self, x): pass
4
1
9
0
8
1
1
0.28
1
1
0
0
3
0
3
13
37
5
25
13
19
7
21
11
17
2
1
1
4
1,469
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrMHAttentionMap
import torch from typing import Optional, Union from torch import Tensor, nn class DabDetrMHAttentionMap(nn.Module): """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.normalize_fact = float(hidden_dim / self.num_heads) ** (-0.5) def forward(self, q, k, mask: Optional[Tensor]=None): q = self.q_linear(q) k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) weights = torch.einsum('bqnc,bnchw->bqnhw', queries_per_head * self.normalize_fact, keys_per_head) if mask is not None: weights = weights.masked_fill(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min) weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size()) weights = self.dropout(weights) return weights
class DabDetrMHAttentionMap(nn.Module): '''This is a 2D attention module, which only returns the attention softmax (no multiplication by value)''' def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): pass def forward(self, q, k, mask: Optional[Tensor]=None): pass
3
1
11
2
10
0
2
0.05
1
3
0
0
2
6
2
12
26
5
20
12
17
1
20
12
17
2
1
1
3
1,470
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrMLP
from torch import Tensor, nn class DabDetrMLP(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList((nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))) def forward(self, input_tensor): for i, layer in enumerate(self.layers): input_tensor = nn.functional.relu(layer(input_tensor)) if i < self.num_layers - 1 else layer(input_tensor) return input_tensor
class DabDetrMLP(nn.Module): ''' Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py ''' def __init__(self, input_dim, hidden_dim, output_dim, num_layers): pass def forward(self, input_tensor): pass
3
1
5
0
5
0
2
0.5
1
3
0
0
2
2
2
12
19
4
10
7
7
5
10
7
7
3
1
1
4
1,471
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrModel
from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput import torch from typing import Optional, Union from ...utils import ModelOutput, auto_docstring, logging @auto_docstring(custom_intro='\n The bare DAB-DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw\n hidden-states, intermediate hidden states, reference points, output coordinates without any specific head on top.\n ') class DabDetrModel(DabDetrPreTrainedModel): def __init__(self, config: DabDetrConfig): super().__init__(config) self.auxiliary_loss = config.auxiliary_loss self.backbone = DabDetrConvEncoder(config) object_queries = DabDetrSinePositionEmbedding(config) self.query_refpoint_embeddings = nn.Embedding(config.num_queries, config.query_dim) self.random_refpoints_xy = config.random_refpoints_xy if self.random_refpoints_xy: self.query_refpoint_embeddings.weight.data[:, :2].uniform_(0, 1) self.query_refpoint_embeddings.weight.data[:, :2] = inverse_sigmoid(self.query_refpoint_embeddings.weight.data[:, :2]) self.query_refpoint_embeddings.weight.data[:, :2].requires_grad = False self.input_projection = nn.Conv2d(self.backbone.intermediate_channel_sizes[-1], config.hidden_size, kernel_size=1) self.backbone = DabDetrConvModel(self.backbone, object_queries) self.encoder = DabDetrEncoder(config) self.decoder = DabDetrDecoder(config) self.hidden_size = config.hidden_size self.num_queries = config.num_queries self.num_patterns = config.num_patterns if not isinstance(self.num_patterns, int): logger.warning(f'num_patterns should be int but {type(self.num_patterns)}') self.num_patterns = 0 if self.num_patterns > 0: self.patterns = nn.Embedding(self.num_patterns, self.hidden_size) self.aux_loss = config.auxiliary_loss self.post_init() def get_encoder(self): return self.encoder def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) @auto_docstring def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], DabDetrModelOutput]: """ decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. Examples: ```python >>> from transformers import AutoImageProcessor, AutoModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("IDEA-Research/dab_detr-base") >>> model = AutoModel.from_pretrained("IDEA-Research/dab_detr-base") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # the last hidden states are the final query embeddings of the Transformer decoder >>> # these are of shape (batch_size, num_queries, hidden_size) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 300, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, _, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=device) features, object_queries_list = self.backbone(pixel_values, pixel_mask) feature_map, mask = features[-1] if mask is None: raise ValueError('Backbone does not return downsampled pixel mask') flattened_mask = mask.flatten(1) projected_feature_map = self.input_projection(feature_map) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1) reference_position_embeddings = self.query_refpoint_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) if encoder_outputs is None: encoder_outputs = self.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) num_queries = reference_position_embeddings.shape[1] if self.num_patterns == 0: queries = torch.zeros(batch_size, num_queries, self.hidden_size, device=device) else: queries = self.patterns.weight[:, None, None, :].repeat(1, self.num_queries, batch_size, 1).flatten(0, 1).permute(1, 0, 2) reference_position_embeddings = reference_position_embeddings.repeat(1, self.num_patterns, 1) decoder_outputs = self.decoder(inputs_embeds=queries, query_position_embeddings=reference_position_embeddings, object_queries=object_queries, encoder_hidden_states=encoder_outputs[0], memory_key_padding_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: output = (decoder_outputs[0],) reference_points = decoder_outputs[-1] intermediate_hidden_states = decoder_outputs[-2] if output_hidden_states and output_attentions: output += (decoder_outputs[1], decoder_outputs[2], decoder_outputs[3], encoder_outputs[0], encoder_outputs[1], encoder_outputs[2]) elif output_hidden_states: output += (decoder_outputs[1], encoder_outputs[0], encoder_outputs[1]) elif output_attentions: output += (decoder_outputs[1], decoder_outputs[2], encoder_outputs[1]) output += (intermediate_hidden_states, reference_points) return output reference_points = decoder_outputs.reference_points intermediate_hidden_states = decoder_outputs.intermediate_hidden_states return DabDetrModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states if output_hidden_states else None, decoder_attentions=decoder_outputs.attentions if output_attentions else None, cross_attentions=decoder_outputs.cross_attentions if output_attentions else None, encoder_last_hidden_state=encoder_outputs.last_hidden_state if output_hidden_states else None, encoder_hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, encoder_attentions=encoder_outputs.attentions if output_attentions else None, intermediate_hidden_states=intermediate_hidden_states, reference_points=reference_points)
null
8
1
37
6
25
8
5
0.31
1
13
8
0
6
12
6
7
232
38
150
49
130
46
75
37
68
21
2
2
31
1,472
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrObjectDetectionOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Output type of [`DabDetrForObjectDetection`].\n ') class DabDetrObjectDetectionOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~DabDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None auxiliary_outputs: Optional[list[dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`DabDetrForObjectDetection`].\n ') class DabDetrObjectDetectionOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~DabDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. ''' pass
3
1
0
0
0
0
0
3.46
1
0
0
0
0
0
0
0
60
2
13
13
12
45
13
13
12
0
1
0
0
1,473
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrPreTrainedModel
from ...utils import ModelOutput, auto_docstring, logging from ...modeling_utils import PreTrainedModel import math from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig @auto_docstring class DabDetrPreTrainedModel(PreTrainedModel): config: DabDetrConfig base_model_prefix = 'model' main_input_name = 'pixel_values' _no_split_modules = ['DabDetrConvEncoder', 'DabDetrEncoderLayer', 'DabDetrDecoderLayer'] def _init_weights(self, module): std = self.config.init_std xavier_std = self.config.init_xavier_std if isinstance(module, DabDetrMHAttentionMap): nn.init.zeros_(module.k_linear.bias) nn.init.zeros_(module.q_linear.bias) nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std) nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std) if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, DabDetrForObjectDetection): nn.init.constant_(module.bbox_predictor.layers[-1].weight.data, 0) nn.init.constant_(module.bbox_predictor.layers[-1].bias.data, 0) prior_prob = self.config.initializer_bias_prior_prob or 1 / (self.config.num_labels + 1) bias_value = -math.log((1 - prior_prob) / prior_prob) module.class_embed.bias.data.fill_(bias_value) elif isinstance(module, nn.PReLU): module.reset_parameters()
@auto_docstring class DabDetrPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
27
2
22
3
7
0.11
1
2
2
4
1
0
1
1
33
3
27
10
25
3
25
10
23
7
1
2
7
1,474
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DabDetrSinePositionEmbedding
import math import torch from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig class DabDetrSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, config: DabDetrConfig): super().__init__() self.config = config self.embedding_dim = config.hidden_size / 2 self.temperature_height = config.temperature_height self.temperature_width = config.temperature_width scale = config.sine_position_embedding_scale if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError('No pixel mask provided') y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) y_embed = y_embed / (y_embed[:, -1:, :] + 1e-06) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + 1e-06) * self.scale dim_tx = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_tx //= 2 dim_tx.mul_(2 / self.embedding_dim) dim_tx.copy_(self.temperature_width ** dim_tx) pos_x = x_embed[:, :, :, None] / dim_tx dim_ty = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_ty //= 2 dim_ty.mul_(2 / self.embedding_dim) dim_ty.copy_(self.temperature_height ** dim_ty) pos_y = y_embed[:, :, :, None] / dim_ty pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos
class DabDetrSinePositionEmbedding(nn.Module): ''' This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. ''' def __init__(self, config: DabDetrConfig): pass def forward(self, pixel_values, pixel_mask): pass
3
1
19
2
16
2
2
0.25
1
3
1
0
2
5
2
12
45
5
32
16
29
8
32
16
29
2
1
1
4
1,475
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dab_detr/modeling_dab_detr.py
transformers.models.dab_detr.modeling_dab_detr.DetrAttention
import torch from typing import Optional, Union from torch import Tensor, nn from .configuration_dab_detr import DabDetrConfig class DetrAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the DETR paper). """ def __init__(self, config: DabDetrConfig, bias: bool=True): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.encoder_attention_heads self.attention_dropout = config.attention_dropout self.head_dim = self.hidden_size // self.num_heads if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.scaling = self.head_dim ** (-0.5) self.k_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=bias) self.v_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=bias) self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=bias) self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=bias) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, q_len, embed_dim = hidden_states.size() if object_queries is not None: hidden_states_original = hidden_states hidden_states = hidden_states + object_queries query_states = self.q_proj(hidden_states) * self.scaling key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states_original) query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_len, embed_dim) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights)
class DetrAttention(nn.Module): ''' Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the DETR paper). ''' def __init__(self, config: DabDetrConfig, bias: bool=True): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
3
2
34
5
28
1
4
0.11
1
5
1
0
2
10
2
12
75
12
57
31
43
6
40
20
37
5
1
1
7
1,476
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/configuration_dac.py
transformers.models.dac.configuration_dac.DacConfig
import math import numpy as np from ...configuration_utils import PretrainedConfig class DacConfig(PretrainedConfig): """ This is the configuration class to store the configuration of an [`DacModel`]. It is used to instantiate a Dac model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [descript/dac_16khz](https://huggingface.co/descript/dac_16khz) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: encoder_hidden_size (`int`, *optional*, defaults to 64): Intermediate representation dimension for the encoder. downsampling_ratios (`list[int]`, *optional*, defaults to `[2, 4, 8, 8]`): Ratios for downsampling in the encoder. These are used in reverse order for upsampling in the decoder. decoder_hidden_size (`int`, *optional*, defaults to 1536): Intermediate representation dimension for the decoder. n_codebooks (`int`, *optional*, defaults to 9): Number of codebooks in the VQVAE. codebook_size (`int`, *optional*, defaults to 1024): Number of discrete codes in each codebook. codebook_dim (`int`, *optional*, defaults to 8): Dimension of the codebook vectors. If not defined, uses `encoder_hidden_size`. quantizer_dropout (`bool`, *optional*, defaults to 0): Whether to apply dropout to the quantizer. commitment_loss_weight (float, *optional*, defaults to 0.25): Weight of the commitment loss term in the VQVAE loss function. codebook_loss_weight (float, *optional*, defaults to 1.0): Weight of the codebook loss term in the VQVAE loss function. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). Example: ```python >>> from transformers import DacModel, DacConfig >>> # Initializing a "descript/dac_16khz" style configuration >>> configuration = DacConfig() >>> # Initializing a model (with random weights) from the "descript/dac_16khz" style configuration >>> model = DacModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'dac' def __init__(self, encoder_hidden_size=64, downsampling_ratios=[2, 4, 8, 8], decoder_hidden_size=1536, n_codebooks=9, codebook_size=1024, codebook_dim=8, quantizer_dropout=0, commitment_loss_weight=0.25, codebook_loss_weight=1.0, sampling_rate=16000, **kwargs): self.encoder_hidden_size = encoder_hidden_size self.downsampling_ratios = downsampling_ratios self.decoder_hidden_size = decoder_hidden_size self.upsampling_ratios = downsampling_ratios[::-1] self.n_codebooks = n_codebooks self.codebook_size = codebook_size self.codebook_dim = codebook_dim self.quantizer_dropout = quantizer_dropout self.sampling_rate = sampling_rate self.hidden_size = encoder_hidden_size * 2 ** len(downsampling_ratios) self.hop_length = int(np.prod(downsampling_ratios)) self.commitment_loss_weight = commitment_loss_weight self.codebook_loss_weight = codebook_loss_weight super().__init__(**kwargs) @property def frame_rate(self) -> int: hop_length = np.prod(self.upsampling_ratios) return math.ceil(self.sampling_rate / hop_length)
class DacConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of an [`DacModel`]. It is used to instantiate a Dac model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [descript/dac_16khz](https://huggingface.co/descript/dac_16khz) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: encoder_hidden_size (`int`, *optional*, defaults to 64): Intermediate representation dimension for the encoder. downsampling_ratios (`list[int]`, *optional*, defaults to `[2, 4, 8, 8]`): Ratios for downsampling in the encoder. These are used in reverse order for upsampling in the decoder. decoder_hidden_size (`int`, *optional*, defaults to 1536): Intermediate representation dimension for the decoder. n_codebooks (`int`, *optional*, defaults to 9): Number of codebooks in the VQVAE. codebook_size (`int`, *optional*, defaults to 1024): Number of discrete codes in each codebook. codebook_dim (`int`, *optional*, defaults to 8): Dimension of the codebook vectors. If not defined, uses `encoder_hidden_size`. quantizer_dropout (`bool`, *optional*, defaults to 0): Whether to apply dropout to the quantizer. commitment_loss_weight (float, *optional*, defaults to 0.25): Weight of the commitment loss term in the VQVAE loss function. codebook_loss_weight (float, *optional*, defaults to 1.0): Weight of the codebook loss term in the VQVAE loss function. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). Example: ```python >>> from transformers import DacModel, DacConfig >>> # Initializing a "descript/dac_16khz" style configuration >>> configuration = DacConfig() >>> # Initializing a model (with random weights) from the "descript/dac_16khz" style configuration >>> model = DacModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, encoder_hidden_size=64, downsampling_ratios=[2, 4, 8, 8], decoder_hidden_size=1536, n_codebooks=9, codebook_size=1024, codebook_dim=8, quantizer_dropout=0, commitment_loss_weight=0.25, codebook_loss_weight=1.0, sampling_rate=16000, **kwargs): pass @property def frame_rate(self) -> int: pass
4
1
17
2
16
0
1
1.12
1
2
0
0
2
13
2
2
84
12
34
32
17
38
20
18
17
1
1
0
2
1,477
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/feature_extraction_dac.py
transformers.models.dac.feature_extraction_dac.DacFeatureExtractor
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from typing import Optional, Union from ...feature_extraction_utils import BatchFeature import numpy as np from ...utils import PaddingStrategy, TensorType, logging class DacFeatureExtractor(SequenceFeatureExtractor): """ Constructs an Dac feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. Use 1 for mono, 2 for stereo. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio waveform should be digitalized, expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used for padding. hop_length (`int`, *optional*, defaults to 512): Overlap length between successive windows. """ model_input_names = ['input_values', 'n_quantizers'] def __init__(self, feature_size: int=1, sampling_rate: int=16000, padding_value: float=0.0, hop_length: int=512, **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.hop_length = hop_length def __call__(self, raw_audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Optional[Union[bool, str, PaddingStrategy]]=None, truncation: Optional[bool]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio (`feature_size = 2`). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, *optional*, defaults to `False`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). return_tensors (`str` or [`~utils.TensorType`], *optional*, default to 'pt'): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided audio input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning(f'It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. Failing to do so can result in silent errors that might be hard to debug.') if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.') elif padding is None: padding = True is_batched = bool(isinstance(raw_audio, (list, tuple)) and isinstance(raw_audio[0], (np.ndarray, tuple, list))) if is_batched: raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio] elif not is_batched and (not isinstance(raw_audio, np.ndarray)): raw_audio = np.asarray(raw_audio, dtype=np.float32) elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64): raw_audio = raw_audio.astype(np.float32) if not is_batched: raw_audio = [np.asarray(raw_audio).T] for idx, example in enumerate(raw_audio): if example.ndim > 2: raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}') if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels') if self.feature_size == 2: raise ValueError("Stereo audio isn't supported for now") input_values = BatchFeature({'input_values': raw_audio}) padded_inputs = self.pad(input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=padding, pad_to_multiple_of=self.hop_length) if padding: padded_inputs['padding_mask'] = padded_inputs.pop('attention_mask') if padding: padded_inputs.input_values = padded_inputs.input_values[:, np.newaxis, :] input_values = [] for example in padded_inputs.pop('input_values'): if self.feature_size == 1: example = example[..., None] input_values.append(example.T) padded_inputs['input_values'] = input_values if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs
class DacFeatureExtractor(SequenceFeatureExtractor): ''' Constructs an Dac feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. Use 1 for mono, 2 for stereo. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio waveform should be digitalized, expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used for padding. hop_length (`int`, *optional*, defaults to 512): Overlap length between successive windows. ''' def __init__(self, feature_size: int=1, sampling_rate: int=16000, padding_value: float=0.0, hop_length: int=512, **kwargs): pass def __call__(self, raw_audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Optional[Union[bool, str, PaddingStrategy]]=None, truncation: Optional[bool]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None) -> BatchFeature: ''' Main method to featurize and prepare for the model one or several sequence(s). Args: raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio (`feature_size = 2`). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, *optional*, defaults to `False`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). return_tensors (`str` or [`~utils.TensorType`], *optional*, default to 'pt'): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. ''' pass
3
2
61
7
37
17
9
0.64
1
10
1
0
2
1
2
19
142
19
75
24
57
48
40
9
37
17
3
2
18
1,478
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacDecoder
from .configuration_dac import DacConfig import torch.nn as nn class DacDecoder(nn.Module): """DAC Decoder""" def __init__(self, config: DacConfig): super().__init__() input_channel = config.hidden_size channels = config.decoder_hidden_size strides = config.upsampling_ratios self.conv1 = nn.Conv1d(input_channel, channels, kernel_size=7, padding=3) block = [] for stride_index, stride in enumerate(strides): block += [DacDecoderBlock(config, stride, stride_index)] self.block = nn.ModuleList(block) output_dim = config.decoder_hidden_size // 2 ** (stride_index + 1) self.snake1 = Snake1d(output_dim) self.conv2 = nn.Conv1d(output_dim, 1, kernel_size=7, padding=3) self.tanh = nn.Tanh() def forward(self, hidden_state): hidden_state = self.conv1(hidden_state) for layer in self.block: hidden_state = layer(hidden_state) hidden_state = self.snake1(hidden_state) hidden_state = self.conv2(hidden_state) hidden_state = self.tanh(hidden_state) return hidden_state
class DacDecoder(nn.Module): '''DAC Decoder''' def __init__(self, config: DacConfig): pass def forward(self, hidden_state): pass
3
1
16
4
11
1
2
0.13
1
5
3
0
2
5
2
12
35
9
23
15
20
3
23
15
20
2
1
1
4
1,479
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacDecoderBlock
import math import torch.nn as nn from .configuration_dac import DacConfig class DacDecoderBlock(nn.Module): """Decoder block used in DAC decoder.""" def __init__(self, config: DacConfig, stride: int=1, stride_index: int=1): super().__init__() input_dim = config.decoder_hidden_size // 2 ** stride_index output_dim = config.decoder_hidden_size // 2 ** (stride_index + 1) self.snake1 = Snake1d(input_dim) self.conv_t1 = nn.ConvTranspose1d(input_dim, output_dim, kernel_size=2 * stride, stride=stride, padding=math.ceil(stride / 2)) self.res_unit1 = DacResidualUnit(output_dim, dilation=1) self.res_unit2 = DacResidualUnit(output_dim, dilation=3) self.res_unit3 = DacResidualUnit(output_dim, dilation=9) def forward(self, hidden_state): hidden_state = self.snake1(hidden_state) hidden_state = self.conv_t1(hidden_state) hidden_state = self.res_unit1(hidden_state) hidden_state = self.res_unit2(hidden_state) hidden_state = self.res_unit3(hidden_state) return hidden_state
class DacDecoderBlock(nn.Module): '''Decoder block used in DAC decoder.''' def __init__(self, config: DacConfig, stride: int=1, stride_index: int=1): pass def forward(self, hidden_state): pass
3
1
13
2
11
0
1
0.04
1
5
3
0
2
5
2
12
29
5
23
10
20
1
17
10
14
1
1
0
2
1,480
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacDecoderOutput
from typing import Optional from ...utils import ModelOutput, auto_docstring import torch import torch.nn as nn from dataclasses import dataclass import torch.nn.functional as F @dataclass @auto_docstring class DacDecoderOutput(ModelOutput): """ audio_values (`torch.FloatTensor` of shape `(batch_size, input_length)`, *optional*): Decoded audio values, obtained using the decoder part of Dac. """ audio_values: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring class DacDecoderOutput(ModelOutput): ''' audio_values (`torch.FloatTensor` of shape `(batch_size, input_length)`, *optional*): Decoded audio values, obtained using the decoder part of Dac. ''' pass
3
1
0
0
0
0
0
2.5
1
0
0
0
0
0
0
0
8
1
2
2
1
5
2
2
1
0
1
0
0
1,481
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacEncoder
import torch.nn as nn from .configuration_dac import DacConfig class DacEncoder(nn.Module): """DAC Encoder""" def __init__(self, config: DacConfig): super().__init__() strides = config.downsampling_ratios self.conv1 = nn.Conv1d(1, config.encoder_hidden_size, kernel_size=7, padding=3) self.block = [] for stride_index, stride in enumerate(strides): stride_index = stride_index + 1 self.block += [DacEncoderBlock(config, stride=stride, stride_index=stride_index)] self.block = nn.ModuleList(self.block) d_model = config.encoder_hidden_size * 2 ** stride_index self.snake1 = Snake1d(d_model) self.conv2 = nn.Conv1d(d_model, config.hidden_size, kernel_size=3, padding=1) def forward(self, hidden_state): hidden_state = self.conv1(hidden_state) for module in self.block: hidden_state = module(hidden_state) hidden_state = self.snake1(hidden_state) hidden_state = self.conv2(hidden_state) return hidden_state
class DacEncoder(nn.Module): '''DAC Encoder''' def __init__(self, config: DacConfig): pass def forward(self, hidden_state): pass
3
1
14
3
10
1
2
0.15
1
5
3
0
2
4
2
12
31
8
20
11
17
3
20
11
17
2
1
1
4
1,482
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacEncoderBlock
import math import torch.nn as nn from .configuration_dac import DacConfig class DacEncoderBlock(nn.Module): """Encoder block used in DAC encoder.""" def __init__(self, config: DacConfig, stride: int=1, stride_index: int=1): super().__init__() dimension = config.encoder_hidden_size * 2 ** stride_index self.res_unit1 = DacResidualUnit(dimension // 2, dilation=1) self.res_unit2 = DacResidualUnit(dimension // 2, dilation=3) self.res_unit3 = DacResidualUnit(dimension // 2, dilation=9) self.snake1 = Snake1d(dimension // 2) self.conv1 = nn.Conv1d(dimension // 2, dimension, kernel_size=2 * stride, stride=stride, padding=math.ceil(stride / 2)) def forward(self, hidden_state): hidden_state = self.res_unit1(hidden_state) hidden_state = self.res_unit2(hidden_state) hidden_state = self.snake1(self.res_unit3(hidden_state)) hidden_state = self.conv1(hidden_state) return hidden_state
class DacEncoderBlock(nn.Module): '''Encoder block used in DAC encoder.''' def __init__(self, config: DacConfig, stride: int=1, stride_index: int=1): pass def forward(self, hidden_state): pass
3
1
9
1
8
0
1
0.06
1
5
3
0
2
5
2
12
22
4
17
9
14
1
15
9
12
1
1
0
2
1,483
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacEncoderOutput
import torch.nn.functional as F from ...utils import ModelOutput, auto_docstring from typing import Optional import torch from dataclasses import dataclass import torch.nn as nn @dataclass @auto_docstring class DacEncoderOutput(ModelOutput): """ loss (`torch.Tensor`): Loss from the encoder model, comprising the weighted combination of the commitment and codebook losses. quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`, *optional*): Quantized continuous representation of input. audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`, *optional*): Codebook indices for each codebook (quantized discrete representation of input). projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`, *optional*): Projected latents (continuous representation of input before quantization). """ loss: Optional[torch.FloatTensor] = None quantized_representation: Optional[torch.FloatTensor] = None audio_codes: Optional[torch.FloatTensor] = None projected_latents: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring class DacEncoderOutput(ModelOutput): ''' loss (`torch.Tensor`): Loss from the encoder model, comprising the weighted combination of the commitment and codebook losses. quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`, *optional*): Quantized continuous representation of input. audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`, *optional*): Codebook indices for each codebook (quantized discrete representation of input). projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`, *optional*): Projected latents (continuous representation of input before quantization). ''' pass
3
1
0
0
0
0
0
2.2
1
0
0
0
0
0
0
0
17
1
5
5
4
11
5
5
4
0
1
0
0
1,484
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacModel
import torch.nn as nn import torch import math from .configuration_dac import DacConfig from typing import Optional from ...utils import ModelOutput, auto_docstring import torch.nn.functional as F @auto_docstring(custom_intro='\n The DAC (Descript Audio Codec) model.\n ') class DacModel(DacPreTrainedModel): def __init__(self, config: DacConfig): super().__init__(config) self.config = config self.encoder = DacEncoder(config) self.decoder = DacDecoder(config) self.quantizer = DacResidualVectorQuantize(config) self.bits_per_codebook = int(math.log2(self.config.codebook_size)) if 2 ** self.bits_per_codebook != self.config.codebook_size: raise ValueError('The codebook_size must be a power of 2.') self.post_init() @auto_docstring def encode(self, input_values: torch.Tensor, n_quantizers: Optional[int]=None, return_dict: Optional[bool]=None): """ input_values (`torch.Tensor of shape `(batch_size, 1, time_steps)`): Input audio data to encode, n_quantizers (int, *optional*): Number of quantizers to use. If None, all quantizers are used. Default is None. """ return_dict = return_dict if return_dict is not None else self.config.return_dict quantized_representation = self.encoder(input_values) quantized_representation, audio_codes, projected_latents, commitment_loss, codebook_loss = self.quantizer(quantized_representation, n_quantizers) loss = self.config.commitment_loss_weight * commitment_loss + self.config.codebook_loss_weight * codebook_loss if not return_dict: return (loss, quantized_representation, audio_codes, projected_latents) return DacEncoderOutput(loss, quantized_representation, audio_codes, projected_latents) @auto_docstring def decode(self, quantized_representation: Optional[torch.Tensor]=None, audio_codes: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None): """ quantized_representation (torch.Tensor of shape `(batch_size, dimension, time_steps)`, *optional*): Quantized continuous representation of input. audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`, *optional*): The codebook indices for each codebook, representing the quantized discrete representation of the input. This parameter should be provided if you want to decode directly from the audio codes (it will overwrite quantized_representation). return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`DacDecoderOutput`] instead of a plain tuple. """ if quantized_representation is None and audio_codes is None: raise ValueError('Either `quantized_representation` or `audio_codes` must be provided.') return_dict = return_dict if return_dict is not None else self.config.return_dict if audio_codes is not None: quantized_representation = self.quantizer.from_codes(audio_codes)[0] audio_values = self.decoder(quantized_representation).squeeze(1) if not return_dict: return (audio_values,) return DacDecoderOutput(audio_values) @auto_docstring def forward(self, input_values: torch.Tensor, n_quantizers: Optional[int]=None, return_dict: Optional[bool]=None): """ input_values (`torch.Tensor` of shape `(batch_size, 1, time_steps)`): Audio data to encode. n_quantizers (`int`, *optional*): Number of quantizers to use. If `None`, all quantizers are used. Default is `None`. Examples: ```python >>> from datasets import load_dataset, Audio >>> from transformers import DacModel, AutoProcessor >>> librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> model = DacModel.from_pretrained("descript/dac_16khz") >>> processor = AutoProcessor.from_pretrained("descript/dac_16khz") >>> librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) >>> audio_sample = librispeech_dummy[-1]["audio"]["array"] >>> inputs = processor(raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt") >>> encoder_outputs = model.encode(inputs["input_values"]) >>> # Get the intermediate audio codes >>> audio_codes = encoder_outputs.audio_codes >>> # Reconstruct the audio from its quantized representation >>> audio_values = model.decode(encoder_outputs.quantized_representation) >>> # or the equivalent with a forward pass >>> audio_values = model(inputs["input_values"]).audio_values ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict length = input_values.shape[-1] loss, quantized_representation, audio_codes, projected_latents = self.encode(input_values, n_quantizers, return_dict=False) audio_values = self.decode(quantized_representation, return_dict=False)[0][..., :length] if not return_dict: return (loss, audio_values, quantized_representation, audio_codes, projected_latents) return DacOutput(loss, audio_values, quantized_representation, audio_codes, projected_latents)
@auto_docstring(custom_intro='\n The DAC (Descript Audio Codec) model.\n ') class DacModel(DacPreTrainedModel): def __init__(self, config: DacConfig): pass @auto_docstring def encode(self, input_values: torch.Tensor, n_quantizers: Optional[int]=None, return_dict: Optional[bool]=None): ''' input_values (`torch.Tensor of shape `(batch_size, 1, time_steps)`): Input audio data to encode, n_quantizers (int, *optional*): Number of quantizers to use. If None, all quantizers are used. Default is None. ''' pass @auto_docstring def decode(self, quantized_representation: Optional[torch.Tensor]=None, audio_codes: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None): ''' quantized_representation (torch.Tensor of shape `(batch_size, dimension, time_steps)`, *optional*): Quantized continuous representation of input. audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`, *optional*): The codebook indices for each codebook, representing the quantized discrete representation of the input. This parameter should be provided if you want to decode directly from the audio codes (it will overwrite quantized_representation). return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`DacDecoderOutput`] instead of a plain tuple. ''' pass @auto_docstring def forward(self, input_values: torch.Tensor, n_quantizers: Optional[int]=None, return_dict: Optional[bool]=None): ''' input_values (`torch.Tensor` of shape `(batch_size, 1, time_steps)`): Audio data to encode. n_quantizers (`int`, *optional*): Number of quantizers to use. If `None`, all quantizers are used. Default is `None`. Examples: ```python >>> from datasets import load_dataset, Audio >>> from transformers import DacModel, AutoProcessor >>> librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> model = DacModel.from_pretrained("descript/dac_16khz") >>> processor = AutoProcessor.from_pretrained("descript/dac_16khz") >>> librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) >>> audio_sample = librispeech_dummy[-1]["audio"]["array"] >>> inputs = processor(raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt") >>> encoder_outputs = model.encode(inputs["input_values"]) >>> # Get the intermediate audio codes >>> audio_codes = encoder_outputs.audio_codes >>> # Reconstruct the audio from its quantized representation >>> audio_values = model.decode(encoder_outputs.quantized_representation) >>> # or the equivalent with a forward pass >>> audio_values = model(inputs["input_values"]).audio_values ```''' pass
9
3
31
6
14
11
3
0.73
1
12
7
0
4
5
4
7
132
28
60
35
36
44
37
17
32
5
2
1
13
1,485
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacOutput
import torch.nn.functional as F from ...utils import ModelOutput, auto_docstring import torch.nn as nn from typing import Optional from dataclasses import dataclass import torch @dataclass @auto_docstring class DacOutput(ModelOutput): """ loss (`torch.Tensor`): Loss from the encoder model, comprising the weighted combination of the commitment and codebook losses. audio_values (`torch.Tensor` of shape `(batch_size, input_length)`): Reconstructed audio data. quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. audio_codes (`torch.LongTensor` of shape `(batch_size, num_codebooks, time_steps)`): Codebook indices for each codebook (quantized discrete representation of input). projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization). """ loss: Optional[torch.FloatTensor] = None audio_values: Optional[torch.FloatTensor] = None quantized_representation: Optional[torch.FloatTensor] = None audio_codes: Optional[torch.LongTensor] = None projected_latents: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring class DacOutput(ModelOutput): ''' loss (`torch.Tensor`): Loss from the encoder model, comprising the weighted combination of the commitment and codebook losses. audio_values (`torch.Tensor` of shape `(batch_size, input_length)`): Reconstructed audio data. quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. audio_codes (`torch.LongTensor` of shape `(batch_size, num_codebooks, time_steps)`): Codebook indices for each codebook (quantized discrete representation of input). projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization). ''' pass
3
1
0
0
0
0
0
2.17
1
0
0
0
0
0
0
0
20
1
6
6
5
13
6
6
5
0
1
0
0
1,486
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacPreTrainedModel
from ...modeling_utils import PreTrainedAudioTokenizerBase from .configuration_dac import DacConfig from ...utils import ModelOutput, auto_docstring import torch.nn as nn @auto_docstring class DacPreTrainedModel(PreTrainedAudioTokenizerBase): config: DacConfig base_model_prefix = 'dac' main_input_name = 'input_values' def _init_weights(self, module): if isinstance(module, nn.Conv1d): nn.init.trunc_normal_(module.weight, std=0.02) nn.init.constant_(module.bias, 0) elif isinstance(module, Snake1d): module.alpha.data.fill_(1.0) elif isinstance(module, nn.ConvTranspose1d): module.reset_parameters() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=0.02) def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, 'weight_norm'): weight_norm = nn.utils.parametrizations.weight_norm for layer in self.quantizer.quantizers: weight_norm(layer.in_proj) weight_norm(layer.out_proj) weight_norm(self.encoder.conv1) weight_norm(self.encoder.conv2) for layer in self.encoder.block: weight_norm(layer.conv1) weight_norm(layer.res_unit1.conv1) weight_norm(layer.res_unit1.conv2) weight_norm(layer.res_unit2.conv1) weight_norm(layer.res_unit2.conv2) weight_norm(layer.res_unit3.conv1) weight_norm(layer.res_unit3.conv2) weight_norm(self.decoder.conv1) weight_norm(self.decoder.conv2) for layer in self.decoder.block: weight_norm(layer.conv_t1) weight_norm(layer.res_unit1.conv1) weight_norm(layer.res_unit1.conv2) weight_norm(layer.res_unit2.conv1) weight_norm(layer.res_unit2.conv2) weight_norm(layer.res_unit3.conv1) weight_norm(layer.res_unit3.conv2) def remove_weight_norm(self): for layer in self.quantizer.quantizers: nn.utils.remove_weight_norm(layer.in_proj) nn.utils.remove_weight_norm(layer.out_proj) nn.utils.remove_weight_norm(self.encoder.conv1) nn.utils.remove_weight_norm(self.encoder.conv2) for layer in self.encoder.block: nn.utils.remove_weight_norm(layer.conv1) nn.utils.remove_weight_norm(layer.res_unit1.conv1) nn.utils.remove_weight_norm(layer.res_unit1.conv2) nn.utils.remove_weight_norm(layer.res_unit2.conv1) nn.utils.remove_weight_norm(layer.res_unit2.conv2) nn.utils.remove_weight_norm(layer.res_unit3.conv1) nn.utils.remove_weight_norm(layer.res_unit3.conv2) nn.utils.remove_weight_norm(self.decoder.conv1) nn.utils.remove_weight_norm(self.decoder.conv2) for layer in self.decoder.block: nn.utils.remove_weight_norm(layer.conv_t1) nn.utils.remove_weight_norm(layer.res_unit1.conv1) nn.utils.remove_weight_norm(layer.res_unit1.conv2) nn.utils.remove_weight_norm(layer.res_unit2.conv1) nn.utils.remove_weight_norm(layer.res_unit2.conv2) nn.utils.remove_weight_norm(layer.res_unit3.conv1) nn.utils.remove_weight_norm(layer.res_unit3.conv2)
@auto_docstring class DacPreTrainedModel(PreTrainedAudioTokenizerBase): def _init_weights(self, module): pass def apply_weight_norm(self): pass def remove_weight_norm(self): pass
5
0
21
3
18
0
4
0.05
1
0
0
1
3
0
3
3
75
13
59
10
55
3
59
10
55
5
1
1
11
1,487
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacResidualUnit
import torch.nn as nn class DacResidualUnit(nn.Module): """ A residual unit composed of Snake1d and weight-normalized Conv1d layers with dilations. """ def __init__(self, dimension: int=16, dilation: int=1): super().__init__() pad = (7 - 1) * dilation // 2 self.snake1 = Snake1d(dimension) self.conv1 = nn.Conv1d(dimension, dimension, kernel_size=7, dilation=dilation, padding=pad) self.snake2 = Snake1d(dimension) self.conv2 = nn.Conv1d(dimension, dimension, kernel_size=1) def forward(self, hidden_state): """ Forward pass through the residual unit. Args: hidden_state (`torch.Tensor` of shape `(batch_size, channels, time_steps)`): Input tensor . Returns: output_tensor (`torch.Tensor` of shape `(batch_size, channels, time_steps)`): Input tensor after passing through the residual unit. """ output_tensor = hidden_state output_tensor = self.conv1(self.snake1(output_tensor)) output_tensor = self.conv2(self.snake2(output_tensor)) padding = (hidden_state.shape[-1] - output_tensor.shape[-1]) // 2 if padding > 0: hidden_state = hidden_state[..., padding:-padding] output_tensor = hidden_state + output_tensor return output_tensor
class DacResidualUnit(nn.Module): ''' A residual unit composed of Snake1d and weight-normalized Conv1d layers with dilations. ''' def __init__(self, dimension: int=16, dilation: int=1): pass def forward(self, hidden_state): ''' Forward pass through the residual unit. Args: hidden_state (`torch.Tensor` of shape `(batch_size, channels, time_steps)`): Input tensor . Returns: output_tensor (`torch.Tensor` of shape `(batch_size, channels, time_steps)`): Input tensor after passing through the residual unit. ''' pass
3
2
15
2
8
5
2
0.71
1
3
1
0
2
4
2
12
35
6
17
10
14
12
17
10
14
2
1
1
3
1,488
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacResidualVectorQuantize
import torch.nn as nn import numpy as np from .configuration_dac import DacConfig import torch import torch.nn.functional as F from typing import Optional class DacResidualVectorQuantize(nn.Module): """ ResidualVectorQuantize block - Introduced in SoundStream: An end2end neural audio codec (https://huggingface.co/papers/2107.03312) """ def __init__(self, config: DacConfig): super().__init__() n_codebooks = config.n_codebooks quantizer_dropout = config.quantizer_dropout self.n_codebooks = n_codebooks self.quantizers = nn.ModuleList([DacVectorQuantize(config) for i in range(config.n_codebooks)]) self.quantizer_dropout = quantizer_dropout def forward(self, hidden_state, n_quantizers: Optional[int]=None): """ Quantizes the input tensor using a fixed set of codebooks and returns corresponding codebook vectors. Args: hidden_state (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Input tensor to be quantized. n_quantizers (`int`, *optional*): Number of quantizers to use. If specified and `self.quantizer_dropout` is True, this argument is ignored during training, and a random number of quantizers is used. Returns: quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`): Codebook indices for each codebook (quantized discrete representation of input). projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization). commitment_loss (`torch.Tensor` of shape `(1)`): Commitment loss to train the encoder to predict vectors closer to codebook entries. codebook_loss (`torch.Tensor` of shape `(1)`): Codebook loss to update the codebook. """ quantized_representation = 0 residual = hidden_state commitment_loss = 0 codebook_loss = 0 audio_codes = [] projected_latents = [] n_quantizers = n_quantizers if n_quantizers is not None else self.n_codebooks if self.training: n_quantizers = torch.ones((hidden_state.shape[0],)) * self.n_codebooks + 1 dropout = torch.randint(1, self.n_codebooks + 1, (hidden_state.shape[0],)) n_dropout = int(hidden_state.shape[0] * self.quantizer_dropout) n_quantizers[:n_dropout] = dropout[:n_dropout] n_quantizers = n_quantizers.to(hidden_state.device) for i, quantizer in enumerate(self.quantizers): if self.training is False and i >= n_quantizers: break quantized_representation_i, commitment_loss_i, codebook_loss_i, indices_i, projected_latents_i = quantizer(residual) mask = torch.full((hidden_state.shape[0],), fill_value=i, device=hidden_state.device) < n_quantizers quantized_representation = quantized_representation + quantized_representation_i * mask[:, None, None] residual = residual - quantized_representation_i commitment_loss += commitment_loss_i * mask codebook_loss += codebook_loss_i * mask audio_codes.append(indices_i) projected_latents.append(projected_latents_i) audio_codes = torch.stack(audio_codes, dim=1) projected_latents = torch.cat(projected_latents, dim=1) return (quantized_representation, audio_codes, projected_latents, commitment_loss, codebook_loss) def from_codes(self, audio_codes: torch.Tensor): """ Reconstructs the continuous representation from quantized codes. Args: audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`): Quantized discrete representation of input. Returns: quantized_representation (`torch.Tensor`): Quantized continuous representation of input. projected_latents (`torch.Tensor`): List of projected latents (continuous representations of input before quantization) for each codebook. audio_codes (`torch.Tensor`): Codebook indices for each codebook. """ quantized_representation = 0.0 projected_latents = [] n_codebooks = audio_codes.shape[1] for i in range(n_codebooks): projected_latents_i = self.quantizers[i].codebook(audio_codes[:, i, :]).transpose(1, 2) projected_latents.append(projected_latents_i) quantized_representation += self.quantizers[i].out_proj(projected_latents_i) return (quantized_representation, torch.cat(projected_latents, dim=1), audio_codes) def from_latents(self, latents: torch.Tensor): """Reconstructs the quantized representation from unquantized latents. Args: latents (`torch.Tensor` of shape `(batch_size, total_latent_dimension, time_steps)`): Continuous representation of input after projection. Returns: quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized representation of the full-projected space. quantized_latents (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized representation of the latent space (continuous representation before quantization). """ quantized_representation = 0 quantized_latents = [] codes = [] codebook_dims_tensor = torch.tensor([0] + [q.codebook_dim for q in self.quantizers]) dims = torch.cumsum(codebook_dims_tensor, dim=0) n_codebooks = np.where(dims <= latents.shape[1])[0].max(axis=0, keepdims=True)[0] for i in range(n_codebooks): hidden_dim_j, hidden_dim_k = (dims[i], dims[i + 1]) quantized_latents_i, codes_i = self.quantizers[i].decode_latents(latents[:, hidden_dim_j:hidden_dim_k, :]) quantized_latents.append(quantized_latents_i) codes.append(codes_i) quantized_representation_i = self.quantizers[i].out_proj(quantized_latents_i) quantized_representation = quantized_representation + quantized_representation_i return (quantized_representation, torch.cat(quantized_latents, dim=1))
class DacResidualVectorQuantize(nn.Module): ''' ResidualVectorQuantize block - Introduced in SoundStream: An end2end neural audio codec (https://huggingface.co/papers/2107.03312) ''' def __init__(self, config: DacConfig): pass def forward(self, hidden_state, n_quantizers: Optional[int]=None): ''' Quantizes the input tensor using a fixed set of codebooks and returns corresponding codebook vectors. Args: hidden_state (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Input tensor to be quantized. n_quantizers (`int`, *optional*): Number of quantizers to use. If specified and `self.quantizer_dropout` is True, this argument is ignored during training, and a random number of quantizers is used. Returns: quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`): Codebook indices for each codebook (quantized discrete representation of input). projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization). commitment_loss (`torch.Tensor` of shape `(1)`): Commitment loss to train the encoder to predict vectors closer to codebook entries. codebook_loss (`torch.Tensor` of shape `(1)`): Codebook loss to update the codebook. ''' pass def from_codes(self, audio_codes: torch.Tensor): ''' Reconstructs the continuous representation from quantized codes. Args: audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`): Quantized discrete representation of input. Returns: quantized_representation (`torch.Tensor`): Quantized continuous representation of input. projected_latents (`torch.Tensor`): List of projected latents (continuous representations of input before quantization) for each codebook. audio_codes (`torch.Tensor`): Codebook indices for each codebook. ''' pass def from_latents(self, latents: torch.Tensor): '''Reconstructs the quantized representation from unquantized latents. Args: latents (`torch.Tensor` of shape `(batch_size, total_latent_dimension, time_steps)`): Continuous representation of input after projection. Returns: quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized representation of the full-projected space. quantized_latents (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized representation of the latent space (continuous representation before quantization). ''' pass
5
4
32
5
15
12
3
0.79
1
7
2
0
4
3
4
14
136
25
62
36
57
49
60
36
55
5
1
2
10
1,489
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.DacVectorQuantize
import torch.nn as nn from .configuration_dac import DacConfig import torch.nn.functional as F class DacVectorQuantize(nn.Module): """ Implementation of VQ similar to Karpathy's repo (https://github.com/karpathy/deep-vector-quantization) Additionally uses following tricks from improved VQGAN (https://huggingface.co/papers/2110.04627): 1. Factorized codes: Perform nearest neighbor lookup in low-dimensional space for improved codebook usage 2. l2-normalized codes: Converts euclidean distance to cosine similarity which improves training stability """ def __init__(self, config: DacConfig): super().__init__() self.codebook_dim = config.codebook_dim self.in_proj = nn.Conv1d(config.hidden_size, config.codebook_dim, kernel_size=1) self.out_proj = nn.Conv1d(config.codebook_dim, config.hidden_size, kernel_size=1) self.codebook = nn.Embedding(config.codebook_size, config.codebook_dim) def forward(self, hidden_state): """ Quantizes the input tensor using a fixed codebook and returns the corresponding codebook vectors. Args: hidden_state (`torch.FloatTensor` of shape `(batch_size, dimension, time_steps)`): Input tensor. Returns: quantized_representation (`torch.Tensor`of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. commitment_loss (`torch.FloatTensor`of shape `(1)`): Commitment loss to train encoder to predict vectors closer to codebook entries. codebook_loss (`torch.FloatTensor`of shape `(1)`): Codebook loss to update the codebook. audio_codes (`torch.LongTensor` of shape `(batch_size, time_steps)`): Codebook indices for each codebook, quantized discrete representation of input. projected_latents (torch.FloatTensor of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization). """ projected_latents = self.in_proj(hidden_state) quantized_representation, audio_codes = self.decode_latents(projected_latents) commitment_loss = F.mse_loss(projected_latents, quantized_representation.detach(), reduction='mean') codebook_loss = F.mse_loss(quantized_representation, projected_latents.detach(), reduction='mean') quantized_representation = projected_latents + (quantized_representation - projected_latents).detach() quantized_representation = self.out_proj(quantized_representation) return (quantized_representation, commitment_loss, codebook_loss, audio_codes, projected_latents) def decode_latents(self, hidden_states): batch_size, hidden_dim, sequence_length = hidden_states.shape encodings = hidden_states.permute(0, 2, 1).reshape(batch_size * sequence_length, hidden_dim) codebook = self.codebook.weight encodings = F.normalize(encodings) codebook = F.normalize(codebook) l2_norm = encodings.pow(2).sum(1, keepdim=True) dist = -(l2_norm - 2 * encodings @ codebook.t()) + codebook.pow(2).sum(1, keepdim=True).t() indices = dist.max(1)[1] indices = indices.reshape(hidden_states.size(0), -1) quantized_representation = self.codebook(indices).transpose(1, 2) return (quantized_representation, indices)
class DacVectorQuantize(nn.Module): ''' Implementation of VQ similar to Karpathy's repo (https://github.com/karpathy/deep-vector-quantization) Additionally uses following tricks from improved VQGAN (https://huggingface.co/papers/2110.04627): 1. Factorized codes: Perform nearest neighbor lookup in low-dimensional space for improved codebook usage 2. l2-normalized codes: Converts euclidean distance to cosine similarity which improves training stability ''' def __init__(self, config: DacConfig): pass def forward(self, hidden_state): ''' Quantizes the input tensor using a fixed codebook and returns the corresponding codebook vectors. Args: hidden_state (`torch.FloatTensor` of shape `(batch_size, dimension, time_steps)`): Input tensor. Returns: quantized_representation (`torch.Tensor`of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. commitment_loss (`torch.FloatTensor`of shape `(1)`): Commitment loss to train encoder to predict vectors closer to codebook entries. codebook_loss (`torch.FloatTensor`of shape `(1)`): Codebook loss to update the codebook. audio_codes (`torch.LongTensor` of shape `(batch_size, time_steps)`): Codebook indices for each codebook, quantized discrete representation of input. projected_latents (torch.FloatTensor of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization). ''' pass def decode_latents(self, hidden_states): pass
4
2
18
3
8
7
1
1.15
1
2
1
0
3
3
3
13
68
13
26
18
22
30
26
18
22
1
1
0
3
1,490
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/dac/modeling_dac.py
transformers.models.dac.modeling_dac.Snake1d
import torch.nn as nn import torch import torch.nn.functional as F class Snake1d(nn.Module): """ A 1-dimensional Snake activation function module. """ def __init__(self, hidden_dim): super().__init__() self.alpha = nn.Parameter(torch.ones(1, hidden_dim, 1)) def forward(self, hidden_states): shape = hidden_states.shape hidden_states = hidden_states.reshape(shape[0], shape[1], -1) hidden_states = hidden_states + (self.alpha + 1e-09).reciprocal() * torch.sin(self.alpha * hidden_states).pow(2) hidden_states = hidden_states.reshape(shape) return hidden_states
class Snake1d(nn.Module): ''' A 1-dimensional Snake activation function module. ''' def __init__(self, hidden_dim): pass def forward(self, hidden_states): pass
3
1
5
0
5
0
1
0.3
1
1
0
0
2
1
2
12
15
2
10
5
7
3
10
5
7
1
1
0
2
1,491
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/configuration_data2vec_audio.py
transformers.models.data2vec.configuration_data2vec_audio.Data2VecAudioConfig
import math from ...configuration_utils import PretrainedConfig class Data2VecAudioConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`Data2VecAudioModel`]. It is used to instantiate an Data2VecAudio model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecAudio [facebook/data2vec-audio-base-960h](https://huggingface.co/facebook/data2vec-audio-base-960h) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the Data2VecAudio model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Data2VecAudioModel`] or [`TFData2VecAudioModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`Data2VecAudioModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`Data2VecAudioForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0),: The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`Data2VecAudioForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`Data2VecAudioForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`Data2VecAudioForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`tuple[int]` or `list[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. add_adapter (`bool`, *optional*, defaults to `False`): Whether a convolutional network should be stacked on top of the Data2VecAudio Encoder. Can be very useful for warm-starting Data2VecAudio for SpeechEncoderDecoder models. adapter_kernel_size (`int`, *optional*, defaults to 3): Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. adapter_stride (`int`, *optional*, defaults to 2): Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. num_adapter_layers (`int`, *optional*, defaults to 3): Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is True`. output_hidden_size (`int`, *optional*): Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant if `add_adapter is True`. Example: ```python >>> from transformers import Data2VecAudioConfig, Data2VecAudioModel >>> # Initializing a Data2VecAudio facebook/data2vec-audio-base-960h style configuration >>> configuration = Data2VecAudioConfig() >>> # Initializing a model (with random weights) from the facebook/data2vec-audio-base-960h style configuration >>> model = Data2VecAudioModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'data2vec-audio' def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embedding_groups=16, conv_pos_kernel_size=19, num_conv_pos_embeddings=5, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction='sum', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=3, output_hidden_size=None, **kwargs): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.conv_pos_kernel_size = conv_pos_kernel_size self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size self.use_weighted_layer_sum = use_weighted_layer_sum if len(self.conv_stride) != self.num_feat_extract_layers or len(self.conv_kernel) != self.num_feat_extract_layers or len(self.conv_dim) != self.num_feat_extract_layers: raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.') self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity self.add_adapter = add_adapter self.adapter_kernel_size = adapter_kernel_size self.adapter_stride = adapter_stride self.num_adapter_layers = num_adapter_layers self.output_hidden_size = output_hidden_size or hidden_size self.classifier_proj_size = classifier_proj_size self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): return math.prod(self.conv_stride)
null
4
1
58
3
52
3
2
1.27
1
3
0
0
2
42
2
2
260
17
107
94
56
136
50
46
47
2
1
1
3
1,492
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/configuration_data2vec_text.py
transformers.models.data2vec.configuration_data2vec_text.Data2VecTextConfig
from ...configuration_utils import PretrainedConfig class Data2VecTextConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`Data2VecTextModel`] and [`Data2VecTextModel`]. It is used to instantiate a Data2VecText model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecText [facebook/data2vec-text-base](https://huggingface.co/facebook/data2vec-text-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the DATA2VEC model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Data2VecModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`Data2VecModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Examples: ```python >>> from transformers import Data2VecTextConfig, Data2VecTextModel >>> # Initializing a Data2VecText facebook/data2vec-text-base style configuration >>> configuration = Data2VecTextConfig() >>> # Initializing a model (with random weights) from the facebook/data2vec-text-base style configuration >>> model = Data2VecTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'data2vec-text' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout
class Data2VecTextConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`Data2VecTextModel`] and [`Data2VecTextModel`]. It is used to instantiate a Data2VecText model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecText [facebook/data2vec-text-base](https://huggingface.co/facebook/data2vec-text-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the DATA2VEC model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Data2VecModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`Data2VecModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Examples: ```python >>> from transformers import Data2VecTextConfig, Data2VecTextModel >>> # Initializing a Data2VecText facebook/data2vec-text-base style configuration >>> configuration = Data2VecTextConfig() >>> # Initializing a model (with random weights) from the facebook/data2vec-text-base style configuration >>> model = Data2VecTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs): pass
2
1
39
1
38
0
1
1.45
1
1
0
0
1
15
1
1
109
11
40
39
17
58
19
18
17
1
1
0
1
1,493
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/configuration_data2vec_text.py
transformers.models.data2vec.configuration_data2vec_text.Data2VecTextOnnxConfig
from collections import OrderedDict from collections.abc import Mapping from ...onnx import OnnxConfig class Data2VecTextOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
class Data2VecTextOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass
3
0
11
0
11
0
2
0
1
3
0
0
1
0
1
1
13
0
13
4
10
0
6
3
4
2
1
1
2
1,494
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/configuration_data2vec_vision.py
transformers.models.data2vec.configuration_data2vec_vision.Data2VecVisionConfig
from ...configuration_utils import PretrainedConfig class Data2VecVisionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`Data2VecVisionModel`]. It is used to instantiate an Data2VecVision model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecVision [facebook/data2vec-vision-base](https://huggingface.co/facebook/data2vec-vision-base) architecture. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. use_mask_token (`bool`, *optional*, defaults to `False`): Whether to use a mask token for masked image modeling. use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`): Whether to use BERT-style absolute position embeddings. use_relative_position_bias (`bool`, *optional*, defaults to `False`): Whether to use T5-style relative position embeddings in the self-attention layers. use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`): Whether to use the same relative position embeddings across all self-attention layers of the Transformer. layer_scale_init_value (`float`, *optional*, defaults to 0.1): Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate per sample (when applied in the main path of residual layers). use_mean_pooling (`bool`, *optional*, defaults to `True`): Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the CLS token, before applying the classification head. out_indices (`list[int]`, *optional*, defaults to `[3, 5, 7, 11]`): Indices of the feature maps to use for semantic segmentation. pool_scales (`tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): Pooling scales used in Pooling Pyramid Module applied on the last feature map. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. auxiliary_channels (`int`, *optional*, defaults to 256): Number of channels to use in the auxiliary head. auxiliary_num_convs (`int`, *optional*, defaults to 1): Number of convolutional layers to use in the auxiliary head. auxiliary_concat_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the output of the auxiliary head with the input before the classification layer. semantic_loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function of the semantic segmentation model. Example: ```python >>> from transformers import Data2VecVisionConfig, Data2VecVisionModel >>> # Initializing a Data2VecVision data2vec_vision-base-patch16-224-in22k style configuration >>> configuration = Data2VecVisionConfig() >>> # Initializing a model (with random weights) from the data2vec_vision-base-patch16-224-in22k style configuration >>> model = Data2VecVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'data2vec-vision' def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, use_mask_token=False, use_absolute_position_embeddings=False, use_relative_position_bias=False, use_shared_relative_position_bias=False, layer_scale_init_value=0.1, drop_path_rate=0.1, use_mean_pooling=True, out_indices=[3, 5, 7, 11], pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, semantic_loss_ignore_index=255, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.use_mask_token = use_mask_token self.use_absolute_position_embeddings = use_absolute_position_embeddings self.use_relative_position_bias = use_relative_position_bias self.use_shared_relative_position_bias = use_shared_relative_position_bias self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.use_mean_pooling = use_mean_pooling self.out_indices = out_indices self.pool_scales = pool_scales self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.auxiliary_channels = auxiliary_channels self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.semantic_loss_ignore_index = semantic_loss_ignore_index
class Data2VecVisionConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`Data2VecVisionModel`]. It is used to instantiate an Data2VecVision model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecVision [facebook/data2vec-vision-base](https://huggingface.co/facebook/data2vec-vision-base) architecture. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. use_mask_token (`bool`, *optional*, defaults to `False`): Whether to use a mask token for masked image modeling. use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`): Whether to use BERT-style absolute position embeddings. use_relative_position_bias (`bool`, *optional*, defaults to `False`): Whether to use T5-style relative position embeddings in the self-attention layers. use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`): Whether to use the same relative position embeddings across all self-attention layers of the Transformer. layer_scale_init_value (`float`, *optional*, defaults to 0.1): Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate per sample (when applied in the main path of residual layers). use_mean_pooling (`bool`, *optional*, defaults to `True`): Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the CLS token, before applying the classification head. out_indices (`list[int]`, *optional*, defaults to `[3, 5, 7, 11]`): Indices of the feature maps to use for semantic segmentation. pool_scales (`tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): Pooling scales used in Pooling Pyramid Module applied on the last feature map. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. auxiliary_channels (`int`, *optional*, defaults to 256): Number of channels to use in the auxiliary head. auxiliary_num_convs (`int`, *optional*, defaults to 1): Number of convolutional layers to use in the auxiliary head. auxiliary_concat_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the output of the auxiliary head with the input before the classification layer. semantic_loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function of the semantic segmentation model. Example: ```python >>> from transformers import Data2VecVisionConfig, Data2VecVisionModel >>> # Initializing a Data2VecVision data2vec_vision-base-patch16-224-in22k style configuration >>> configuration = Data2VecVisionConfig() >>> # Initializing a model (with random weights) from the data2vec_vision-base-patch16-224-in22k style configuration >>> model = Data2VecVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, use_mask_token=False, use_absolute_position_embeddings=False, use_relative_position_bias=False, use_shared_relative_position_bias=False, layer_scale_init_value=0.1, drop_path_rate=0.1, use_mean_pooling=True, out_indices=[3, 5, 7, 11], pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, semantic_loss_ignore_index=255, **kwargs): pass
2
1
63
2
59
2
1
1.21
1
1
0
0
1
27
1
1
145
10
61
60
29
74
31
30
29
1
1
0
1
1,495
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/configuration_data2vec_vision.py
transformers.models.data2vec.configuration_data2vec_vision.Data2VecVisionOnnxConfig
from collections import OrderedDict from ...onnx import OnnxConfig from packaging import version from collections.abc import Mapping class Data2VecVisionOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse('1.11') @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})]) @property def atol_for_validation(self) -> float: return 0.0001
class Data2VecVisionOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def atol_for_validation(self) -> float: pass
5
0
4
0
4
0
1
0
1
4
0
0
2
0
2
2
14
2
12
6
7
0
6
4
3
1
1
0
2
1,496
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioAdapter
import numpy as np from torch import nn class Data2VecAudioAdapter(nn.Module): def __init__(self, config): super().__init__() if config.output_hidden_size != config.hidden_size: self.proj = nn.Linear(config.hidden_size, config.output_hidden_size) self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size) else: self.proj = self.proj_layer_norm = None self.layers = nn.ModuleList((Data2VecAudioAdapterLayer(config) for _ in range(config.num_adapter_layers))) self.layerdrop = config.layerdrop def forward(self, hidden_states): if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) hidden_states = hidden_states.transpose(1, 2) for layer in self.layers: layerdrop_prob = np.random.random() if not self.training or layerdrop_prob > self.layerdrop: hidden_states = layer(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states
class Data2VecAudioAdapter(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
14
3
10
1
3
0.1
1
3
1
0
2
4
2
12
29
6
21
9
18
2
20
9
17
4
1
2
6
1,497
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioAdapterLayer
from torch import nn class Data2VecAudioAdapterLayer(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d(config.output_hidden_size, 2 * config.output_hidden_size, config.adapter_kernel_size, stride=config.adapter_stride, padding=1) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=1) return hidden_states
class Data2VecAudioAdapterLayer(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
7
1
7
0
1
0
1
1
0
0
2
1
2
12
16
2
14
4
11
0
8
4
5
1
1
0
2
1,498
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioAttention
from ...modeling_flash_attention_utils import FlashAttentionKwargs import torch from .configuration_data2vec_audio import Data2VecAudioConfig from typing import Callable, Optional, Union from ...processing_utils import Unpack from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from torch import nn class Data2VecAudioAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[Data2VecAudioConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) current_states = key_value_states if is_cross_attention else hidden_states key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights, None)
class Data2VecAudioAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[Data2VecAudioConfig]=None): pass def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
3
2
50
7
35
8
5
0.24
1
7
1
2
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
1,499
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioConvLayer
from ...activations import ACT2FN from torch import nn from ...modeling_layers import GradientCheckpointingLayer class Data2VecAudioConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states
class Data2VecAudioConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): pass def forward(self, hidden_states): pass
3
0
12
2
10
0
2
0
1
1
0
0
2
5
2
12
25
4
21
8
18
0
15
8
12
2
1
0
3