id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
1,800
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
transformers.models.deprecated.mctct.modeling_mctct.MCTCTOutput
from torch import nn class MCTCTOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class MCTCTOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, input_tensor): pass
3
0
5
0
5
0
1
0
1
1
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
1,801
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
transformers.models.deprecated.mctct.modeling_mctct.MCTCTPreTrainedModel
from .configuration_mctct import MCTCTConfig from ....modeling_utils import PreTrainedModel from torch import nn import torch class MCTCTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: MCTCTConfig base_model_prefix = 'mctct' main_input_name = 'input_features' supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, MCTCTLayerNorm): module.singleton_weight.data.fill_(1.0) module.singleton_bias.data.zero_() if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ dilation = 1 for _, kernel_sz, stride in zip(range(self.config.num_conv_layers), self.config.conv_kernel, self.config.conv_stride): padding = kernel_sz // 2 input_lengths = input_lengths + 2 * padding - dilation * (kernel_sz - 1) - 1 input_lengths = torch.div(input_lengths, stride, rounding_mode='trunc') + 1 return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask): if len(attention_mask.shape) > 2: attention_mask = attention_mask[:, :, -1] subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)) bsz = attention_mask.size()[0] attention_mask = torch.zeros((bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device) attention_mask[torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long() return attention_mask
class MCTCTPreTrainedModel(PreTrainedModel): ''' An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. ''' def _init_weights(self, module): '''Initialize the weights''' pass def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): ''' Computes the output length of the convolutional layers ''' pass def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask): pass
4
3
18
1
13
4
4
0.33
1
3
1
3
3
0
3
132
67
7
45
14
41
15
38
14
34
9
2
2
13
1,802
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
transformers.models.deprecated.mctct.modeling_mctct.MCTCTSelfAttention
import torch import math from torch import nn class MCTCTSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_dim self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def reshape_fortran(self, x, shape): if len(x.shape) > 0: x = x.permute(*reversed(range(len(x.shape)))) return x.reshape(*reversed(shape)).permute(*reversed(range(len(shape)))) def relative_position_embedding_rotate(self, scores): scores = scores.permute(0, 2, 3, 1) batch, hidden_state, seq_len, heads = scores.shape scores = torch.cat((scores, torch.zeros((batch, seq_len, seq_len, heads), device=scores.device)), dim=1) scores = self.reshape_fortran(scores, [batch, (hidden_state + seq_len) * seq_len, 1, heads]) scores = scores[:, :(seq_len + hidden_state - 1) * seq_len] scores = self.reshape_fortran(scores, [batch, hidden_state + seq_len - 1, seq_len, heads]) halfpoint = hidden_state // 2 scores = scores[:, halfpoint:halfpoint + seq_len].transpose(1, 2) return scores.permute(0, 3, 1, 2) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) mixed_query_layer = mixed_query_layer / math.sqrt(self.attention_head_size) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) positional_embedding = self.distance_embedding.weight relative_position_scores = torch.einsum('lh, bche -> bcle', positional_embedding, query_layer.transpose(2, 3)) relative_position_scores = self.relative_position_embedding_rotate(relative_position_scores) attention_scores = attention_scores + relative_position_scores if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).flatten(start_dim=-2) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class MCTCTSelfAttention(nn.Module): def __init__(self, config): pass def transpose_for_scores(self, x): pass def reshape_fortran(self, x, shape): pass def relative_position_embedding_rotate(self, scores): pass def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): pass
6
0
20
5
12
3
2
0.25
1
4
0
0
5
10
5
15
106
29
63
35
51
16
54
29
48
4
1
1
10
1,803
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
transformers.models.deprecated.mctct.modeling_mctct.MCTCTSelfOutput
from torch import nn class MCTCTSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class MCTCTSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, input_tensor): pass
3
0
6
0
6
0
1
0
1
1
0
0
2
4
2
12
13
1
12
7
9
0
12
7
9
1
1
0
2
1,804
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/processing_mctct.py
transformers.models.deprecated.mctct.processing_mctct.MCTCTProcessor
from contextlib import contextmanager import warnings from ....processing_utils import ProcessorMixin class MCTCTProcessor(ProcessorMixin): """ Constructs a MCTCT processor which wraps a MCTCT feature extractor and a MCTCT tokenizer into a single processor. [`MCTCTProcessor`] offers all the functionalities of [`MCTCTFeatureExtractor`] and [`AutoTokenizer`]. See the [`~MCTCTProcessor.__call__`] and [`~MCTCTProcessor.decode`] for more information. Args: feature_extractor (`MCTCTFeatureExtractor`): An instance of [`MCTCTFeatureExtractor`]. The feature extractor is a required input. tokenizer (`AutoTokenizer`): An instance of [`AutoTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = 'MCTCTFeatureExtractor' tokenizer_class = 'AutoTokenizer' def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's [`~MCTCTFeatureExtractor.__call__`] and returns its output. If used in the context [`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to AutoTokenizer's [`~AutoTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. """ if self._in_target_context_manager: return self.current_processor(*args, **kwargs) if 'raw_speech' in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.') audio = kwargs.pop('raw_speech') else: audio = kwargs.pop('audio', None) sampling_rate = kwargs.pop('sampling_rate', None) text = kwargs.pop('text', None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.') if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs['labels'] = encodings['input_ids'] return inputs def pad(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's [`~MCTCTFeatureExtractor.pad`] and returns its output. If used in the context [`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.pad`]. Please refer to the docstring of the above two methods for more information. """ if self._in_target_context_manager: return self.current_processor.pad(*args, **kwargs) input_features = kwargs.pop('input_features', None) labels = kwargs.pop('labels', None) if len(args) > 0: input_features = args[0] args = args[1:] if input_features is not None: input_features = self.feature_extractor.pad(input_features, *args, **kwargs) if labels is not None: labels = self.tokenizer.pad(labels, **kwargs) if labels is None: return input_features elif input_features is None: return labels else: input_features['labels'] = labels['input_ids'] return input_features def decode(self, *args, **kwargs): """ This method forwards all its arguments to AutoTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning MCTCT. """ warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.') self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.feature_extractor self._in_target_context_manager = False
class MCTCTProcessor(ProcessorMixin): ''' Constructs a MCTCT processor which wraps a MCTCT feature extractor and a MCTCT tokenizer into a single processor. [`MCTCTProcessor`] offers all the functionalities of [`MCTCTFeatureExtractor`] and [`AutoTokenizer`]. See the [`~MCTCTProcessor.__call__`] and [`~MCTCTProcessor.decode`] for more information. Args: feature_extractor (`MCTCTFeatureExtractor`): An instance of [`MCTCTFeatureExtractor`]. The feature extractor is a required input. tokenizer (`AutoTokenizer`): An instance of [`AutoTokenizer`]. The tokenizer is a required input. ''' def __init__(self, feature_extractor, tokenizer): pass def __call__(self, *args, **kwargs): ''' When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's [`~MCTCTFeatureExtractor.__call__`] and returns its output. If used in the context [`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to AutoTokenizer's [`~AutoTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. ''' pass def pad(self, *args, **kwargs): ''' When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's [`~MCTCTFeatureExtractor.pad`] and returns its output. If used in the context [`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.pad`]. Please refer to the docstring of the above two methods for more information. ''' pass def decode(self, *args, **kwargs): ''' This method forwards all its arguments to AutoTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. ''' pass @contextmanager def as_target_processor(self): ''' Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning MCTCT. ''' pass
7
5
16
1
11
4
3
0.51
1
2
0
0
6
2
6
23
119
16
68
19
60
35
58
18
51
9
2
1
20
1,805
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/configuration_mega.py
transformers.models.deprecated.mega.configuration_mega.MegaConfig
from ....configuration_utils import PretrainedConfig class MegaConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MegaModel`]. It is used to instantiate a Mega model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mega [mnaylor/mega-base-wikitext](https://huggingface.co/mnaylor/mega-base-wikitext) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the Mega model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MegaModel`]. hidden_size (`int`, *optional*, defaults to 128): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 4): Number of hidden layers in the Mega encoder. intermediate_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden size (self-attention value projection) within the Mega encoder ema_projection_size (`int`, *optional*, defaults to 16): Dimensionality of the MegaMultiDimensionDampedEma bidirectional (`bool`, *optional*, defaults to `True`): Whether the MegaMultiDimensionDampedEma used in Mega's self-attention should work bidirectionally (`True`) or unidirectionally (`False`). Bidirectional EMA is incompatible with causal decoding, so this should be False if you intend to use the model as a decoder. shared_representation_size (`int`, *optional*, defaults to 64): Dimensionality of the linear projection for shared representation of self-attention queries and keys use_chunking (`bool`, *optional*, defaults to `False`): Whether to chunk inputs for linear self-attention complexity (described as Mega-chunk in the paper) chunk_size (`int`, *optional*, defaults to -1): If `use_chunking` is set to `True`, determines the size of the chunks to apply to the input sequence. If chunking is used, input sequences must be padded to a multiple of `chunk_size` truncation (`int`, *optional*): If specified, the sequence length for which to truncate MegaMultiDimensionDampedEma normalize_before_mega (`bool`, *optional*, defaults to `True`): Whether to normalize before (`True`) or after (`False`) passing through Mega encoder blocks normalization_type (`str`, *optional*, defaults to `"scalenorm"`): Type of normalization to use in Mega encoder blocks. Choose one of `"scalenorm"`, `"layernorm"`, `"rmsnorm"`, `"batchnorm"`, or `"syncbatchnorm"` (GPU required for syncbatchnorm) norm_affine (`bool`, *optional*, defaults to `True`): If `True`, applies a parameterized affine transformation to inputs during normalization activation (`str`, *optional*, defaults to `"silu"`): Activation function to apply within Mega encoder blocks. Choose one of `"silu"`, `"relu"`, `"linear"`, `"gelu"`, or `"gelu_accurate"` attention_activation (`str`, *optional*, defaults to `"softmax"`): Activation function to apply for single-headed self-attention (a la Transformer). Choose one of `"softmax"`, `"laplace"`, or `"relu2"` dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for EMA self-attention hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. use_feature_dropout (`bool`, *optional*, defaults to `False`): Whether to use feature-based (`True`) or standard dropout (`False`) use_normalized_ffn (`bool`, *optional*, defaults to `True`): Whether to use the normalized feed-forward sub-layer in Mega blocks (`True`) or pass Mega encoder output as-is (`False`) nffn_hidden_size (`int`, *optional*, defaults to 256): If using the normalized feed-forward network (NFFN) layer within Mega (`use_normalized_ffn = True`), this is the hidden size of the NFFN normalize_before_ffn (`bool`, *optional*, defaults to `True`): Whether to normalize before (`True`) or after (`False`) the feed-forward portion of NFFN nffn_activation_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the NFFN component. max_positions (`int`, *optional*, defaults to 2048): The maximum sequence length to use for positional representations. For `"simple"` relative positional bias, this is a hard limit on input length; `"rotary"` relative positional bias will extrapolate to longer sequences add_token_type_embeddings (`bool`, *optional*, defaults to `True`): Whether to account for token types in embeddings. Left as optional to maintain compatibility with original implementation while adding support for token types. type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`MegaModel`]. Only used if `add_token_type_embeddings = True` initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. ema_delta_alpha_range (`float`, *optional*, defaults to 0.2): The standard deviation for initializing the delta (damping factor) and alpha (decay factor) parameters in MegaMultiDimensionDampedEma. ema_beta_range (`float`, *optional*, defaults to 0.02): The standard deviation for initializing the beta parameter (expansion matrix) in MegaMultiDimensionDampedEma. ema_gamma_omega_range (`float`, *optional*, defaults to 1.0): The standard deviation for initializing the gamma (projection matrix) and omega (residual weight) parameters in MultiDimensionEMA. relative_positional_bias (`str`, *optional*, defaults to `"rotary"`): Type of relative positional encoding. Choose one of `"rotary"` or `"simple"`. If `"simple"` is selected, `max_positions` is used as a limit on input size, while `"rotary"` extrapolates beyond `max_positions`. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. add_lm_hidden_dense_layer (`bool`, *optional*, defaults to `True`): Whether to include a hidden layer for projection between encoder outputs and LM heads (`True`) or pass hidden states directly to LM head (`False`). Remains optional for compatibility with original implementation Examples: ```python >>> from transformers import MegaConfig, MegaModel >>> # Initializing a Mega configuration >>> configuration = MegaConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MegaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'mega' def __init__(self, vocab_size=30522, hidden_size=128, num_hidden_layers=4, intermediate_size=256, ema_projection_size=16, bidirectional=True, shared_representation_size=64, use_chunking=False, chunk_size=-1, truncation=None, normalize_before_mega=True, normalization_type='scalenorm', norm_affine=True, activation='silu', attention_activation='softmax', dropout_prob=0.1, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, use_feature_dropout=False, use_normalized_ffn=True, nffn_hidden_size=256, normalize_before_ffn=True, nffn_activation_dropout_prob=0.1, max_positions=2048, add_token_type_embeddings=False, type_vocab_size=2, initializer_range=0.02, ema_delta_alpha_range=0.2, ema_beta_range=0.02, ema_gamma_omega_range=1.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, relative_positional_bias='rotary', classifier_dropout=None, use_cache=True, add_lm_hidden_dense_layer=True, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.activation = activation self.attention_activation = attention_activation self.intermediate_size = intermediate_size self.ema_projection_size = ema_projection_size self.bidirectional = bidirectional self.shared_representation_size = shared_representation_size self.use_chunking = use_chunking self.chunk_size = chunk_size self.truncation = truncation self.normalize_before_mega = normalize_before_mega self.normalization_type = normalization_type self.norm_affine = norm_affine self.dropout_prob = dropout_prob self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.use_feature_dropout = use_feature_dropout self.use_normalized_ffn = use_normalized_ffn self.nffn_hidden_size = nffn_hidden_size self.normalize_before_ffn = normalize_before_ffn self.nffn_activation_dropout_prob = nffn_activation_dropout_prob self.max_positions = max_positions self.add_token_type_embeddings = add_token_type_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.ema_delta_alpha_range = ema_delta_alpha_range self.ema_beta_range = ema_beta_range self.ema_gamma_omega_range = ema_gamma_omega_range self.relative_positional_bias = relative_positional_bias self.use_cache = use_cache self.classifier_dropout = classifier_dropout self.add_lm_hidden_dense_layer = add_lm_hidden_dense_layer self.num_attention_heads = 1
class MegaConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MegaModel`]. It is used to instantiate a Mega model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mega [mnaylor/mega-base-wikitext](https://huggingface.co/mnaylor/mega-base-wikitext) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the Mega model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MegaModel`]. hidden_size (`int`, *optional*, defaults to 128): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 4): Number of hidden layers in the Mega encoder. intermediate_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden size (self-attention value projection) within the Mega encoder ema_projection_size (`int`, *optional*, defaults to 16): Dimensionality of the MegaMultiDimensionDampedEma bidirectional (`bool`, *optional*, defaults to `True`): Whether the MegaMultiDimensionDampedEma used in Mega's self-attention should work bidirectionally (`True`) or unidirectionally (`False`). Bidirectional EMA is incompatible with causal decoding, so this should be False if you intend to use the model as a decoder. shared_representation_size (`int`, *optional*, defaults to 64): Dimensionality of the linear projection for shared representation of self-attention queries and keys use_chunking (`bool`, *optional*, defaults to `False`): Whether to chunk inputs for linear self-attention complexity (described as Mega-chunk in the paper) chunk_size (`int`, *optional*, defaults to -1): If `use_chunking` is set to `True`, determines the size of the chunks to apply to the input sequence. If chunking is used, input sequences must be padded to a multiple of `chunk_size` truncation (`int`, *optional*): If specified, the sequence length for which to truncate MegaMultiDimensionDampedEma normalize_before_mega (`bool`, *optional*, defaults to `True`): Whether to normalize before (`True`) or after (`False`) passing through Mega encoder blocks normalization_type (`str`, *optional*, defaults to `"scalenorm"`): Type of normalization to use in Mega encoder blocks. Choose one of `"scalenorm"`, `"layernorm"`, `"rmsnorm"`, `"batchnorm"`, or `"syncbatchnorm"` (GPU required for syncbatchnorm) norm_affine (`bool`, *optional*, defaults to `True`): If `True`, applies a parameterized affine transformation to inputs during normalization activation (`str`, *optional*, defaults to `"silu"`): Activation function to apply within Mega encoder blocks. Choose one of `"silu"`, `"relu"`, `"linear"`, `"gelu"`, or `"gelu_accurate"` attention_activation (`str`, *optional*, defaults to `"softmax"`): Activation function to apply for single-headed self-attention (a la Transformer). Choose one of `"softmax"`, `"laplace"`, or `"relu2"` dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for EMA self-attention hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. use_feature_dropout (`bool`, *optional*, defaults to `False`): Whether to use feature-based (`True`) or standard dropout (`False`) use_normalized_ffn (`bool`, *optional*, defaults to `True`): Whether to use the normalized feed-forward sub-layer in Mega blocks (`True`) or pass Mega encoder output as-is (`False`) nffn_hidden_size (`int`, *optional*, defaults to 256): If using the normalized feed-forward network (NFFN) layer within Mega (`use_normalized_ffn = True`), this is the hidden size of the NFFN normalize_before_ffn (`bool`, *optional*, defaults to `True`): Whether to normalize before (`True`) or after (`False`) the feed-forward portion of NFFN nffn_activation_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the NFFN component. max_positions (`int`, *optional*, defaults to 2048): The maximum sequence length to use for positional representations. For `"simple"` relative positional bias, this is a hard limit on input length; `"rotary"` relative positional bias will extrapolate to longer sequences add_token_type_embeddings (`bool`, *optional*, defaults to `True`): Whether to account for token types in embeddings. Left as optional to maintain compatibility with original implementation while adding support for token types. type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`MegaModel`]. Only used if `add_token_type_embeddings = True` initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. ema_delta_alpha_range (`float`, *optional*, defaults to 0.2): The standard deviation for initializing the delta (damping factor) and alpha (decay factor) parameters in MegaMultiDimensionDampedEma. ema_beta_range (`float`, *optional*, defaults to 0.02): The standard deviation for initializing the beta parameter (expansion matrix) in MegaMultiDimensionDampedEma. ema_gamma_omega_range (`float`, *optional*, defaults to 1.0): The standard deviation for initializing the gamma (projection matrix) and omega (residual weight) parameters in MultiDimensionEMA. relative_positional_bias (`str`, *optional*, defaults to `"rotary"`): Type of relative positional encoding. Choose one of `"rotary"` or `"simple"`. If `"simple"` is selected, `max_positions` is used as a limit on input size, while `"rotary"` extrapolates beyond `max_positions`. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. add_lm_hidden_dense_layer (`bool`, *optional*, defaults to `True`): Whether to include a hidden layer for projection between encoder outputs and LM heads (`True`) or pass hidden states directly to LM head (`False`). Remains optional for compatibility with original implementation Examples: ```python >>> from transformers import MegaConfig, MegaModel >>> # Initializing a Mega configuration >>> configuration = MegaConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MegaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30522, hidden_size=128, num_hidden_layers=4, intermediate_size=256, ema_projection_size=16, bidirectional=True, shared_representation_size=64, use_chunking=False, chunk_size=-1, truncation=None, normalize_before_mega=True, normalization_type='scalenorm', norm_affine=True, activation='silu', attention_activation='softmax', dropout_prob=0.1, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, use_feature_dropout=False, use_normalized_ffn=True, nffn_hidden_size=256, normalize_before_ffn=True, nffn_activation_dropout_prob=0.1, max_positions=2048, add_token_type_embeddings=False, type_vocab_size=2, initializer_range=0.02, ema_delta_alpha_range=0.2, ema_beta_range=0.02, ema_gamma_omega_range=1.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, relative_positional_bias='rotary', classifier_dropout=None, use_cache=True, add_lm_hidden_dense_layer=True, **kwargs): pass
2
1
78
1
77
1
1
1.38
1
1
0
0
1
35
1
33
198
11
79
78
37
109
39
38
37
1
2
0
1
1,806
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/configuration_mega.py
transformers.models.deprecated.mega.configuration_mega.MegaOnnxConfig
from collections import OrderedDict from ....onnx import OnnxConfig from collections.abc import Mapping class MegaOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
class MegaOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass
3
0
11
0
11
0
2
0
1
3
0
0
1
0
1
40
13
0
13
4
10
0
6
3
4
2
5
1
2
1,807
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py
transformers.models.deprecated.mega.convert_mega_original_pytorch_checkpoint_to_pytorch.MegaLM
from torch import nn import torch class MegaLM(nn.Module): """The base class for our Mega encoder - given input IDs, embed text and return encoder output""" def __init__(self, mega_args, depth, vocab_size): super().__init__() self.mega_args = mega_args self.embedding_layer = nn.Embedding(vocab_size, self.mega_args.encoder_embed_dim) self.encoders = nn.ModuleList([MegaEncoderLayer(self.mega_args) for _ in range(depth)]) self.depth = depth def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0): """ Code for a forward pass - expects input_ids and attention_mask to come from a Hugging Face tokenizer as PyTorch tensors, and returns a tensor of size (batch, n_classes) containing classification logits Other options: - batch_first: boolean indicating whether the batch dimension is first in input_ids (default: True, which aligns with the HF tokenizer behavior) - ignore_mask_value: the value in attention_mask that identifies tokens that should be ignored (default: 0, which aligns with HF tokenizer) """ if batch_first: input_ids = input_ids.T if ignore_mask_value == 0: attention_mask = 1 - attention_mask embeds = self.embedding_layer(input_ids) for encoder in self.encoders: embeds = encoder(embeds, attention_mask) if batch_first: return torch.transpose(embeds, 0, 1) else: return embeds
class MegaLM(nn.Module): '''The base class for our Mega encoder - given input IDs, embed text and return encoder output''' def __init__(self, mega_args, depth, vocab_size): pass def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0): ''' Code for a forward pass - expects input_ids and attention_mask to come from a Hugging Face tokenizer as PyTorch tensors, and returns a tensor of size (batch, n_classes) containing classification logits Other options: - batch_first: boolean indicating whether the batch dimension is first in input_ids (default: True, which aligns with the HF tokenizer behavior) - ignore_mask_value: the value in attention_mask that identifies tokens that should be ignored (default: 0, which aligns with HF tokenizer) ''' pass
3
2
22
3
9
10
3
1.05
1
2
0
0
2
4
2
12
47
8
19
9
16
20
18
9
15
5
1
1
6
1,808
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py
transformers.models.deprecated.mega.convert_mega_original_pytorch_checkpoint_to_pytorch.OriginalMegaForMaskedLM
from torch import nn class OriginalMegaForMaskedLM(nn.Module): """A wrapper class for doing masked language modeling with Mega""" def __init__(self, mega_args, depth, vocab_size): super().__init__() self.mega = MegaLM(mega_args, depth, vocab_size) self.mlm_head = nn.Linear(mega_args.encoder_embed_dim, vocab_size) self.dropout = nn.Dropout(p=0.1) def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0): """ Perform a forward pass through the Mega encoder and the masked LM head. Returns logits for each vocabulary entry. If `batch_first` (default to align with Hugging Face tokenizer behavior), output will have the shape (Batch size, Sequence length, Vocab size); otherwise (S, B, V) """ encoder_output = self.mega(input_ids, attention_mask, batch_first, ignore_mask_value) return self.mlm_head(self.dropout(encoder_output))
class OriginalMegaForMaskedLM(nn.Module): '''A wrapper class for doing masked language modeling with Mega''' def __init__(self, mega_args, depth, vocab_size): pass def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0): ''' Perform a forward pass through the Mega encoder and the masked LM head. Returns logits for each vocabulary entry. If `batch_first` (default to align with Hugging Face tokenizer behavior), output will have the shape (Batch size, Sequence length, Vocab size); otherwise (S, B, V) ''' pass
3
2
8
1
4
3
1
0.78
1
2
1
0
2
3
2
12
19
3
9
7
6
7
9
7
6
1
1
0
2
1,809
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaBlock
from torch import nn import torch.nn.functional as F from ....cache_utils import Cache from .configuration_mega import MegaConfig import torch from ....utils.deprecation import deprecate_kwarg from typing import Optional, Union class MegaBlock(nn.Module): def __init__(self, config: MegaConfig): super().__init__() self.seq_len_dim = 1 self.mega_layer = MegaMovingAverageGatedAttention(config) self.nffn = MegaNormalizedFeedForwardNetwork(config) if config.use_normalized_ffn else None self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.cross_attn = MegaGatedCrossAttention(config) else: self.cross_attn = None @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, causal_mask: Optional[torch.LongTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: bool=False) -> tuple[torch.Tensor]: """ A single Mega layer: either encoder or decoder, with optional cross-attention and optional normalized feed-forward layer Args: hidden_states (`torch.Tensor` of shape `(target_sequence_length, batch_size, hidden_size)`): Hidden states to be updated by the Mega block attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indicates which entries in the self/target sequence are to be ignored (mostly due to padding), where elements are either 1 for *not masked* or 0 for *masked*. Causal attention is enforced internally. causal_mask (`torch.LongTensor` of shape `(sequence_length, sequence_length)`, *optional*): Indicates which inputs are to be ignored due to causal attention, where elements are either 1 for *not masked* or 0 for *masked* encoder_hidden_states (`torch.Tensor`, of shape `(source_sequence_length, batch_size, hidden_size)`, *optional*): Encoder hidden states to be used for cross-attention (and required for encoder-decoder model setup) encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, source_sequence_length)`, *optional*): Indicates which entries in the cross/source sequence are to be ignored (mostly due to padding), where elements are either 1 for *not masked* or 0 for *masked*. past_key_values (`tuple(torch.Tensor)`, *optional*): The hidden states returned from the previous timestep during incremental decoding; expects that self-attention key, value, and EMA states are the first 3 entries in the tuple, and (if doing cross-attention) cross-attention key and value are the last 2 entries in the tuple output_attentions (`bool`, default `False`): Whether to return self-attention weights use_cache (`bool`, default `False`): Whether to perform incremental decoding; uses `past_key_values` as prior state, and returns the updated states for use in the next step Returns: `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and inputs: - **hidden_states** (`torch.FloatTensor` of shape `(target_sequence_length, batch_size, hidden_size)`) -- Hidden states from target sequence updated by Mega - **self_attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape `(batch_size, 1, target_sequence_length, target_sequence_length)` -- The self-attention weights corresponding to how each token in the input sequence attends to every other token - **cross_attn_weights** (*optional*, returned when `output_attentions=True` and `config.add_cross_attention=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, target_sequence_length)` -- Pairwise cross-attention weights between every entry in the source sequence and target sequence - **self_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, sequence_length, config.shared_representation_size)` -- The self-attention key state for use in the next step of incremental decoding - **self_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, sequence_length, config.hidden_size)` -- The self-attention value state for use in the next step of incremental decoding - **self_ema_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, config.ndim)` The incremental EMA state for use in the next step of incremental decoding. - **cross_key** (*optional*, returned when `use_cache=True` and `config.is_decoder=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.shared_representation_size)` -- The cross-attention key state for use in the next step of incremental decoding - **cross_value** (*optional*, returned when `use_cache=True` and `config.is_decoder=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.hidden_size)` -- The cross-attention value state for use in the next step of incremental decoding """ if use_cache and past_key_values is not None and (attention_mask is not None): mega_padding_mask = attention_mask[:, -1].unsqueeze(-1) else: mega_padding_mask = attention_mask mega_outputs = self.mega_layer(input=hidden_states, padding_mask=mega_padding_mask, causal_mask=causal_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache) new_hidden_states = mega_outputs[0] self_key, self_value, self_ema_state = mega_outputs[-3:] if use_cache else (None, None, None) self_attention_weights = mega_outputs[1] if output_attentions else None if self.cross_attn is not None: if encoder_hidden_states is None: raise ValueError('Requested cross-attention without providing encoder hidden states') cross_attn_outputs = self.cross_attn(query=new_hidden_states, key=encoder_hidden_states, value=encoder_hidden_states, key_padding_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache) new_hidden_states = cross_attn_outputs[0] cross_key, cross_value = cross_attn_outputs[-2:] if use_cache else (None, None) cross_attention_weights = cross_attn_outputs[1] if output_attentions else None if self.nffn is not None: new_hidden_states = self.nffn(new_hidden_states) outs = (new_hidden_states,) if output_attentions: outs = outs + (self_attention_weights,) if self.cross_attn is not None: outs = outs + (cross_attention_weights,) if use_cache: new_key_values = (self_key, self_value, self_ema_state) if self.cross_attn is not None: new_key_values = new_key_values + (cross_key, cross_value) outs = outs + (new_key_values,) return outs
class MegaBlock(nn.Module): def __init__(self, config: MegaConfig): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, causal_mask: Optional[torch.LongTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: bool=False) -> tuple[torch.Tensor]: ''' A single Mega layer: either encoder or decoder, with optional cross-attention and optional normalized feed-forward layer Args: hidden_states (`torch.Tensor` of shape `(target_sequence_length, batch_size, hidden_size)`): Hidden states to be updated by the Mega block attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indicates which entries in the self/target sequence are to be ignored (mostly due to padding), where elements are either 1 for *not masked* or 0 for *masked*. Causal attention is enforced internally. causal_mask (`torch.LongTensor` of shape `(sequence_length, sequence_length)`, *optional*): Indicates which inputs are to be ignored due to causal attention, where elements are either 1 for *not masked* or 0 for *masked* encoder_hidden_states (`torch.Tensor`, of shape `(source_sequence_length, batch_size, hidden_size)`, *optional*): Encoder hidden states to be used for cross-attention (and required for encoder-decoder model setup) encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, source_sequence_length)`, *optional*): Indicates which entries in the cross/source sequence are to be ignored (mostly due to padding), where elements are either 1 for *not masked* or 0 for *masked*. past_key_values (`tuple(torch.Tensor)`, *optional*): The hidden states returned from the previous timestep during incremental decoding; expects that self-attention key, value, and EMA states are the first 3 entries in the tuple, and (if doing cross-attention) cross-attention key and value are the last 2 entries in the tuple output_attentions (`bool`, default `False`): Whether to return self-attention weights use_cache (`bool`, default `False`): Whether to perform incremental decoding; uses `past_key_values` as prior state, and returns the updated states for use in the next step Returns: `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and inputs: - **hidden_states** (`torch.FloatTensor` of shape `(target_sequence_length, batch_size, hidden_size)`) -- Hidden states from target sequence updated by Mega - **self_attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape `(batch_size, 1, target_sequence_length, target_sequence_length)` -- The self-attention weights corresponding to how each token in the input sequence attends to every other token - **cross_attn_weights** (*optional*, returned when `output_attentions=True` and `config.add_cross_attention=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, target_sequence_length)` -- Pairwise cross-attention weights between every entry in the source sequence and target sequence - **self_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, sequence_length, config.shared_representation_size)` -- The self-attention key state for use in the next step of incremental decoding - **self_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, sequence_length, config.hidden_size)` -- The self-attention value state for use in the next step of incremental decoding - **self_ema_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, config.ndim)` The incremental EMA state for use in the next step of incremental decoding. - **cross_key** (*optional*, returned when `use_cache=True` and `config.is_decoder=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.shared_representation_size)` -- The cross-attention key state for use in the next step of incremental decoding - **cross_value** (*optional*, returned when `use_cache=True` and `config.is_decoder=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.hidden_size)` -- The cross-attention value state for use in the next step of incremental decoding ''' pass
4
1
73
7
36
31
9
0.85
1
8
4
0
2
6
2
12
147
14
72
29
59
61
41
19
38
13
1
2
17
1,810
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaClassificationHead
import torch import torch.nn.functional as F from torch import nn class MegaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x
class MegaClassificationHead(nn.Module): '''Head for sentence-level classification tasks.''' def __init__(self, config): pass def forward(self, features, **kwargs): pass
3
1
8
0
8
1
2
0.12
1
1
0
0
2
3
2
12
20
2
17
8
14
2
15
8
12
2
1
0
3
1,811
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaDropout
import torch.nn.functional as F from torch import nn class MegaDropout(nn.Module): """ A unified class for standard dropout functionality and featurewise dropout. The original fairseq Mega repo used 2 classes for these, which included some unnecessary handling of training logic and an unused `inplace` option. The original implementation used torch.nn.functional instead of submodules, which is retained here as well. """ def __init__(self, dropout_probability, is_featurewise=False): super().__init__() self.dropout_probability = dropout_probability self.is_featurewise = is_featurewise def forward(self, input, batch_first: bool=False): if self.is_featurewise: if batch_first: return F.dropout2d(input.transpose(-1, -2), p=self.dropout_probability, training=self.training).transpose(-1, -2) else: if input.dim() != 3: raise ValueError('Feature dropout inputs must be exactly 3-dimensional if inputs are ordered [sequence length, batch size, hidden dimension]') return F.dropout2d(input.permute(1, 2, 0), p=self.dropout_probability, training=self.training).permute(2, 0, 1) else: return F.dropout(input, p=self.dropout_probability, training=self.training)
class MegaDropout(nn.Module): ''' A unified class for standard dropout functionality and featurewise dropout. The original fairseq Mega repo used 2 classes for these, which included some unnecessary handling of training logic and an unused `inplace` option. The original implementation used torch.nn.functional instead of submodules, which is retained here as well. ''' def __init__(self, dropout_probability, is_featurewise=False): pass def forward(self, input, batch_first: bool=False): pass
3
1
13
0
10
3
3
0.57
1
3
0
0
2
2
2
12
36
3
21
5
18
12
13
5
10
4
1
3
5
1,812
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaEmbeddings
import torch import torch.nn.functional as F from torch import nn from .configuration_mega import MegaConfig class MegaEmbeddings(nn.Module): """ Mega's basic implementation does not incorporate token type embeddings, so this is a stripped-down version of RoBERTa's embeddings which optionally includes token types """ def __init__(self, config: MegaConfig): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.use_token_types = config.add_token_type_embeddings if self.use_token_types: self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.register_buffer('token_type_ids', torch.zeros(config.max_positions, dtype=torch.long).expand((1, -1)), persistent=False) self.padding_idx = config.pad_token_id def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): if input_ids is None and inputs_embeds is None: raise ValueError('Must provide one of input_ids or inputs_embeds') elif input_ids is not None: input_shape = input_ids.size() device = input_ids.device inputs_embeds = self.word_embeddings(input_ids) else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device if self.use_token_types: if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :input_shape[1]] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], input_shape[1]) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings else: embeddings = inputs_embeds return embeddings
class MegaEmbeddings(nn.Module): ''' Mega's basic implementation does not incorporate token type embeddings, so this is a stripped-down version of RoBERTa's embeddings which optionally includes token types ''' def __init__(self, config: MegaConfig): pass def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): pass
3
1
23
2
17
4
4
0.35
1
3
1
0
2
4
2
12
52
6
34
13
31
12
28
13
25
6
1
3
8
1,813
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaForCausalLM
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn from ....modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch.nn.functional as F from ....cache_utils import Cache from .configuration_mega import MegaConfig import torch from typing import Optional, Union @add_start_docstrings('MEGA Model with a `language modeling` head on top for CLM fine-tuning.', MEGA_START_DOCSTRING) class MegaForCausalLM(MegaPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: MegaConfig): super().__init__(config) if not config.is_decoder: logger.warning('If you want to use `MegaForCausalLM` as a standalone, add `is_decoder=True.`') self.mega = MegaModel(config, add_pooling_layer=False) if config.add_lm_hidden_dense_layer: self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.hidden_activation = nn.Tanh() else: self.dense = None self.hidden_activation = None self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) self.post_init() @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: """ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, MegaForCausalLM, AutoConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("mnaylor/mega-base-wikitext") >>> config = AutoConfig.from_pretrained("mnaylor/mega-base-wikitext") >>> config.is_decoder = True >>> config.bidirectional = False >>> model = MegaForCausalLM.from_pretrained( ... "mnaylor/mega-base-wikitext", config=config, ignore_mismatched_sizes=True ... ) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.mega(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] if self.dense is not None: sequence_output = self.dense(sequence_output) sequence_output = self.hidden_activation(sequence_output) prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: input_ids = input_ids[:, -1:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past
@add_start_docstrings('MEGA Model with a `language modeling` head on top for CLM fine-tuning.', MEGA_START_DOCSTRING) class MegaForCausalLM(MegaPreTrainedModel): def __init__(self, config: MegaConfig): pass @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: ''' encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, MegaForCausalLM, AutoConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("mnaylor/mega-base-wikitext") >>> config = AutoConfig.from_pretrained("mnaylor/mega-base-wikitext") >>> config.is_decoder = True >>> config.bidirectional = False >>> model = MegaForCausalLM.from_pretrained( ... "mnaylor/mega-base-wikitext", config=config, ignore_mismatched_sizes=True ... ) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): pass def _reorder_cache(self, past_key_values, beam_idx): pass
8
1
25
4
14
7
3
0.47
1
7
3
0
6
4
6
136
158
29
88
37
65
41
50
22
43
7
3
1
17
1,814
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaForMaskedLM
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn from ....modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch.nn.functional as F from .configuration_mega import MegaConfig import torch from typing import Optional, Union @add_start_docstrings('MEGA Model with a `language modeling` head on top.', MEGA_START_DOCSTRING) class MegaForMaskedLM(MegaPreTrainedModel): _tied_weights_keys = ['mlm_head.weight'] def __init__(self, config: MegaConfig): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `MegaForMaskedLM`, set `config.is_decoder=False` for bi-directional self-attention.') self.mega = MegaModel(config, add_pooling_layer=False) if config.add_lm_hidden_dense_layer: self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.hidden_activation = nn.Tanh() else: self.dense = None self.hidden_activation = None self.mlm_head = nn.Linear(config.hidden_size, config.vocab_size) self.dropout = nn.Dropout(config.dropout_prob) self.post_init() def get_output_embeddings(self): return self.mlm_head def set_output_embeddings(self, new_embeddings): self.mlm_head = new_embeddings @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='<mask>', expected_output="' Paris'", expected_loss=0.1) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MaskedLMOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` kwargs (`dict[str, any]`, optional, defaults to *{}*): Used to hide legacy arguments that have been deprecated. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mega(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] if self.dense is not None: sequence_output = self.dense(sequence_output) sequence_output = self.hidden_activation(sequence_output) prediction_scores = self.mlm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('MEGA Model with a `language modeling` head on top.', MEGA_START_DOCSTRING) class MegaForMaskedLM(MegaPreTrainedModel): def __init__(self, config: MegaConfig): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='<mask>', expected_output="' Paris'", expected_loss=0.1) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MaskedLMOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` kwargs (`dict[str, any]`, optional, defaults to *{}*): Used to hide legacy arguments that have been deprecated. ''' pass
8
1
20
2
16
2
3
0.12
1
6
3
0
4
5
4
134
95
11
75
30
49
9
35
17
30
6
3
1
11
1,815
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaForMultipleChoice
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn from ....modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch.nn.functional as F import torch from typing import Optional, Union @add_start_docstrings('\n MEGA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', MEGA_START_DOCSTRING) class MegaForMultipleChoice(MegaPreTrainedModel): def __init__(self, config): super().__init__(config) self.mega = MegaModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.mega(flat_input_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n MEGA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', MEGA_START_DOCSTRING) class MegaForMultipleChoice(MegaPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) ''' pass
6
1
34
4
27
4
6
0.12
1
5
2
0
2
3
2
132
76
9
60
29
41
7
27
18
24
10
3
1
11
1,816
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaForQuestionAnswering
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn from ....modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch.nn.functional as F import torch from typing import Optional, Union @add_start_docstrings('\n MEGA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', MEGA_START_DOCSTRING) class MegaForQuestionAnswering(MegaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mega = MegaModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: """ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mega(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n MEGA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', MEGA_START_DOCSTRING) class MegaForQuestionAnswering(MegaPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: ''' start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. ''' pass
6
1
39
5
28
7
4
0.21
1
5
2
0
2
3
2
132
86
10
63
28
43
13
32
16
29
7
3
2
8
1,817
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaForSequenceClassification
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch.nn.functional as F import torch from typing import Optional, Union @add_start_docstrings('\n MEGA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', MEGA_START_DOCSTRING) class MegaForSequenceClassification(MegaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.mega = MegaModel(config, add_pooling_layer=False) self.classifier = MegaClassificationHead(config) self.post_init() @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mega(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n MEGA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', MEGA_START_DOCSTRING) class MegaForSequenceClassification(MegaPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
6
1
37
4
30
4
7
0.1
1
7
3
0
2
4
2
132
82
8
67
24
48
7
33
13
30
12
3
3
13
1,818
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaForTokenClassification
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn from ....modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch.nn.functional as F import torch from typing import Optional, Union @add_start_docstrings('\n MEGA Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', MEGA_START_DOCSTRING) class MegaForTokenClassification(MegaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mega = MegaModel(config, add_pooling_layer=False) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mega(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n MEGA Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', MEGA_START_DOCSTRING) class MegaForTokenClassification(MegaPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
6
1
30
4
24
3
4
0.09
1
5
2
0
2
4
2
132
68
9
54
25
35
5
23
14
20
5
3
1
7
1,819
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaGatedCrossAttention
from torch import nn import torch.nn.functional as F from ....cache_utils import Cache from .configuration_mega import MegaConfig import torch from ....activations import ACT2FN from typing import Optional, Union class MegaGatedCrossAttention(nn.Module): """ Gated Structured State Attention for use in encoder-decoder model. See Mega paper for more details. Only modifications from original implementation are variable names, removing the unnecessary `before_attn_fn` and `static_kv` arguments, and the stateful representation of incremental decoder state. """ def __init__(self, config: MegaConfig): super().__init__() self.config = config self.activation = ACT2FN[self.config.activation] self.attention_activation = self.config.attention_activation self.scaling = self.config.shared_representation_size ** (-0.5) if self.attention_activation == 'softmax' else None self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout) self.hidden_dropout = MegaDropout(self.config.hidden_dropout_prob, is_featurewise=self.config.use_feature_dropout) self.attention_dropout = MegaDropout(self.config.attention_probs_dropout_prob, is_featurewise=False) self.prenorm = self.config.normalize_before_mega self.norm = MegaSequenceNorm(self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine) self.k_proj = nn.Linear(self.config.hidden_size, self.config.shared_representation_size) self.v_proj = nn.Linear(self.config.hidden_size, self.config.hidden_size) self.q_proj = nn.Linear(self.config.hidden_size, 2 * self.config.hidden_size + self.config.shared_representation_size) self.h_proj = nn.Linear(self.config.hidden_size, self.config.hidden_size) if self.config.relative_positional_bias == 'simple': self.rel_pos_bias = MegaSimpleRelativePositionalBias(config) elif self.config.relative_positional_bias == 'rotary': self.rel_pos_bias = MegaRotaryRelativePositionalBias(config) else: raise ValueError(f'unknown relative position bias: {self.config.relative_positional_bias}') self.softmax = nn.Softmax(dim=-1) def element_attention(self, query, key, key_padding_mask, pidx): bsz, src_len, _ = key.size() tgt_len = query.size(1) if pidx is None else pidx + 1 if key_padding_mask is not None: lengths = key_padding_mask.sum(dim=-1).view(bsz, 1, 1) else: lengths = src_len bias = self.rel_pos_bias(max(tgt_len, src_len))[:, :src_len] if pidx is not None: if query.size(1) != 1: raise ValueError('Position offset provided with queries longer than 1 token') bias = bias[pidx] else: bias = bias[:tgt_len] qk = torch.bmm(query, key.transpose(1, 2)) / lengths + bias attn_weights = ACT2FN[self.attention_activation](qk).type_as(qk) if key_padding_mask is not None: attn_weights = attn_weights * key_padding_mask.unsqueeze(1) return attn_weights def softmax_attention(self, query, key, key_padding_mask, pidx): bsz, src_len, _ = key.size() tgt_len = query.size(1) if pidx is None else pidx + 1 bias = self.rel_pos_bias(max(tgt_len, src_len))[:, :src_len] if pidx is not None: if query.size(1) != 1: raise ValueError('Position offset provided with queries longer than 1 token') bias = bias[pidx] else: bias = bias[:tgt_len] query = query * self.scaling qk = torch.bmm(query, key.transpose(1, 2)) + bias if key_padding_mask is not None: qk = qk.masked_fill((1 - key_padding_mask).unsqueeze(1).to(torch.bool), float('-inf')) attn_weights = self.softmax(qk).type_as(qk) return attn_weights def forward(self, query, key: Optional[torch.Tensor], value: Optional[torch.Tensor], key_padding_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """ Gated cross-attention used in Mega Args: query (`torch.Tensor` of shape `(target_sequence_length, batch_size, hidden_size)`): The self (or target) sequence input used as query inputs for cross-attention key (`torch.Tensor` of shape `(source_sequence_length, batch_size, hidden_size)`): The cross (or source) sequence input with shape used as keys in cross-attention value (`torch.Tensor` of shape `(source_sequence_length, batch_size, hidden_size)`): The cross (or source) sequence input with shape used as values in cross-attention key_padding_mask (`torch.LongTensor` of shape `(batch_size, source_sequence_length)`, *optional*): Padding mask corresponding to the source sequence, where entries are 1 for *not masked* and 0 for *masked* tokens past_key_values (`tuple(torch.FloatTensor)`, *optional*): If provided, the hidden state returned from the previous timestep during incremental decoding; expects that prior cross-attention keys and values will be the last two items in the tuple output_attentions (`bool`, defaults to `False`): Whether or not to return the cross-attention weights. use_cache (`bool`, defaults to `False`): Whether to perform incremental decoding; uses `prev_state` as the prior timestep, and returns the updated EMA hidden state for use in the next step Returns: `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and inputs: - **hidden_states** (`torch.FloatTensor` of shape `(target_sequence_length, batch_size, hidden_size)`) -- Hidden states from target sequence updated by gated cross-attention - **attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, target_sequence_length)` -- The pairwise cross-attention weights corresponding to each token in the source and target sequences - **cross_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.shared_representation_size)` -- The cross-attention key state for use in the next step of incremental decoding - **cross_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.hidden_size)` -- The cross-attention value state for use in the next step of incremental decoding """ seq_len, bsz, embed_dim = query.size() if embed_dim != self.config.hidden_size: raise ValueError(f'Unexpected embedding dimension received: input is {embed_dim} but expected {self.config.hidden_size}') if past_key_values is not None: if seq_len != 1: raise ValueError(f'Incremental decoding requested with self-sequence length > 1: {seq_len}') prev_cross_key, prev_cross_value = past_key_values[-2:] key = value = None prev_self_key = past_key_values[0] num_incremental_steps = prev_self_key.size(1) + 1 else: prev_cross_key = prev_cross_value = None num_incremental_steps = 0 if use_cache and seq_len == 1 else None full_query = query if self.prenorm: full_query = self.norm(full_query) query_projected = self.q_proj(full_query) residual_weight, target_gate, attention_query = torch.split(query_projected, [self.config.hidden_size, self.config.hidden_size, self.config.shared_representation_size], dim=-1) residual_weight = torch.sigmoid(residual_weight) target_gate = F.silu(target_gate) if key is None: if value is not None: raise ValueError('Key and value must be `None` simultaneously') projected_key = projected_value = None else: projected_key = self.k_proj(key) projected_value = self.activation(self.v_proj(key)) attention_query = attention_query.transpose(0, 1) if projected_key is not None: projected_key = projected_key.transpose(0, 1) if projected_value is not None: projected_value = projected_value.transpose(0, 1) if past_key_values is not None: projected_key = prev_cross_key projected_value = prev_cross_value if use_cache: updated_cross_key = projected_key updated_cross_value = projected_value ctx_len = projected_key.size(1) if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: if key_padding_mask.size(0) != bsz: raise ValueError('Key padding mask does not align on the batch dimension') if key_padding_mask.size(1) != ctx_len: raise ValueError('Key padding mask does not align on the sequence length dimension') if self.attention_activation == 'softmax': attn_weights = self.softmax_attention(attention_query, projected_key, key_padding_mask, num_incremental_steps) else: attn_weights = self.element_attention(attention_query, projected_key, key_padding_mask, num_incremental_steps) projected_value = self.hidden_dropout(projected_value, batch_first=True) kernel = self.attention_dropout(attn_weights) weighted_targets = torch.bmm(kernel, projected_value).transpose(0, 1) weighted_targets = self.activation(self.h_proj(weighted_targets * target_gate)) weighted_targets = self.dropout(weighted_targets) out = torch.addcmul(query, residual_weight, weighted_targets - query) if not self.prenorm: out = self.norm(out) outputs = (out, attn_weights) if output_attentions else (out,) if use_cache: outputs = outputs + (updated_cross_key, updated_cross_value) return outputs
class MegaGatedCrossAttention(nn.Module): ''' Gated Structured State Attention for use in encoder-decoder model. See Mega paper for more details. Only modifications from original implementation are variable names, removing the unnecessary `before_attn_fn` and `static_kv` arguments, and the stateful representation of incremental decoder state. ''' def __init__(self, config: MegaConfig): pass def element_attention(self, query, key, key_padding_mask, pidx): pass def softmax_attention(self, query, key, key_padding_mask, pidx): pass def forward(self, query, key: Optional[torch.Tensor], value: Optional[torch.Tensor], key_padding_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]: ''' Gated cross-attention used in Mega Args: query (`torch.Tensor` of shape `(target_sequence_length, batch_size, hidden_size)`): The self (or target) sequence input used as query inputs for cross-attention key (`torch.Tensor` of shape `(source_sequence_length, batch_size, hidden_size)`): The cross (or source) sequence input with shape used as keys in cross-attention value (`torch.Tensor` of shape `(source_sequence_length, batch_size, hidden_size)`): The cross (or source) sequence input with shape used as values in cross-attention key_padding_mask (`torch.LongTensor` of shape `(batch_size, source_sequence_length)`, *optional*): Padding mask corresponding to the source sequence, where entries are 1 for *not masked* and 0 for *masked* tokens past_key_values (`tuple(torch.FloatTensor)`, *optional*): If provided, the hidden state returned from the previous timestep during incremental decoding; expects that prior cross-attention keys and values will be the last two items in the tuple output_attentions (`bool`, defaults to `False`): Whether or not to return the cross-attention weights. use_cache (`bool`, defaults to `False`): Whether to perform incremental decoding; uses `prev_state` as the prior timestep, and returns the updated EMA hidden state for use in the next step Returns: `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and inputs: - **hidden_states** (`torch.FloatTensor` of shape `(target_sequence_length, batch_size, hidden_size)`) -- Hidden states from target sequence updated by gated cross-attention - **attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, target_sequence_length)` -- The pairwise cross-attention weights corresponding to each token in the source and target sequences - **cross_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.shared_representation_size)` -- The cross-attention key state for use in the next step of incremental decoding - **cross_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.hidden_size)` -- The cross-attention value state for use in the next step of incremental decoding ''' pass
5
2
62
9
36
17
9
0.49
1
10
5
0
4
15
4
14
256
38
146
56
132
72
113
47
108
20
1
2
35
1,820
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaModel
from torch import nn from ....modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch.nn.functional as F from ....cache_utils import Cache from .configuration_mega import MegaConfig import torch from typing import Optional, Union @add_start_docstrings('The bare MEGA Model transformer outputting raw hidden-states without any specific head on top.', MEGA_START_DOCSTRING) class MegaModel(MegaPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added after self-attention, following the architecture described in *Mega: Moving Average Equipped Gated Attention*_ by Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer To behave as a decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True` and `bidirectional` set to `False`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder=True` and `bidirectional=False` argument as well as `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. .. _*Mega: Moving Average Equipped Gated Attention*: https://huggingface.co/papers/2209.10655 """ def __init__(self, config: MegaConfig, add_pooling_layer=True): super().__init__(config) self.config = config self.embedding_layer = MegaEmbeddings(config) self.layers = nn.ModuleList([MegaBlock(config) for _ in range(config.num_hidden_layers)]) self.pooler = MegaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embedding_layer.word_embeddings def set_input_embeddings(self, value): self.embedding_layer.word_embeddings = value @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: """ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device else: raise ValueError('You have to specify either input_ids or inputs_embeds') if self.config.use_chunking: input_shape = torch.tensor([input_shape[0], self.config.chunk_size]) batch_size, sequence_length = input_shape if self.config.use_chunking and sequence_length > self.config.chunk_size: if sequence_length % self.config.chunk_size != 0: raise ValueError(f'config.use_chunking is activated; input sequence length must be shorter than or a multiple of config.chunk_size\nreceived sequence length of {sequence_length} with chunk size {self.config.chunk_size}') if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache temp_mask_for_extension = torch.ones((1, sequence_length), dtype=torch.long, device=device) causal_mask = self.create_extended_attention_mask_for_decoder(input_shape, temp_mask_for_extension) causal_mask = causal_mask.squeeze(0) else: use_cache = False causal_mask = None if past_key_values is not None and len(past_key_values) != self.config.num_hidden_layers: raise ValueError(f'Received past key/value cache with size mismatch; expected {self.config.num_hidden_layers}, received {len(past_key_values)}') embedding_output = self.embedding_layer(input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) hidden_states = embedding_output.transpose(0, 1) if encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states.transpose(0, 1) all_hidden_states = (embedding_output,) if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, mega_layer in enumerate(self.layers): current_decoder_cache = past_key_values[i] if past_key_values is not None else None mega_outputs = mega_layer(hidden_states=hidden_states, attention_mask=attention_mask, causal_mask=causal_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=current_decoder_cache, output_attentions=output_attentions, use_cache=use_cache) hidden_states = mega_outputs[0] if output_hidden_states: all_hidden_states += (hidden_states.transpose(0, 1),) if output_attentions: self_attn_weights = mega_outputs[1] all_self_attentions += (self_attn_weights,) if self.config.add_cross_attention: cross_attn_weights = mega_outputs[2] all_cross_attentions += (cross_attn_weights,) if use_cache: updated_cache = mega_outputs[-1] next_decoder_cache += (updated_cache,) hidden_states = hidden_states.transpose(0, 1) pooled_output = self.pooler(hidden_states) if self.pooler is not None else None if not return_dict: return (hidden_states, pooled_output) + (all_hidden_states, next_decoder_cache, all_self_attentions, all_cross_attentions) return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=hidden_states, pooler_output=pooled_output, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
@add_start_docstrings('The bare MEGA Model transformer outputting raw hidden-states without any specific head on top.', MEGA_START_DOCSTRING) class MegaModel(MegaPreTrainedModel): ''' The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added after self-attention, following the architecture described in *Mega: Moving Average Equipped Gated Attention*_ by Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer To behave as a decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True` and `bidirectional` set to `False`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder=True` and `bidirectional=False` argument as well as `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. .. _*Mega: Moving Average Equipped Gated Attention*: https://huggingface.co/papers/2209.10655 ''' def __init__(self, config: MegaConfig, add_pooling_layer=True): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: ''' encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). ''' pass
8
2
42
6
28
9
8
0.38
1
11
5
0
4
4
4
134
195
31
119
41
95
45
67
27
62
26
3
3
30
1,821
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaMovingAverageGatedAttention
from torch import nn import torch.nn.functional as F from ....cache_utils import Cache from .configuration_mega import MegaConfig import torch from ....activations import ACT2FN from typing import Optional, Union class MegaMovingAverageGatedAttention(nn.Module): """ Pure PyTorch implementation of Mega block; see https://huggingface.co/papers/2209.10655 and original fairseq implementation at https://github.com/facebookresearch/mega (copyright Meta Research, licensed under MIT License) Differences from original implementation include hidden state refactor and fixed inconsistency with additive / multiplicative attention masks """ def __init__(self, config: MegaConfig): super().__init__() self.config = config self.activation = ACT2FN[self.config.activation] self.scaling = self.config.shared_representation_size ** (-0.5) if self.config.attention_activation == 'softmax' else None self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout) self.hidden_dropout = MegaDropout(self.config.hidden_dropout_prob, is_featurewise=self.config.use_feature_dropout) self.attention_dropout = MegaDropout(self.config.attention_probs_dropout_prob, is_featurewise=False) self.norm = MegaSequenceNorm(self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine) self.ema_gate = MegaMultiDimensionDampedEma(config) self.v_proj = nn.Linear(self.config.hidden_size, self.config.intermediate_size) self.mx_proj = nn.Linear(self.config.hidden_size, self.config.shared_representation_size + self.config.intermediate_size + 2 * self.config.hidden_size) self.h_proj = nn.Linear(self.config.intermediate_size, self.config.hidden_size) self.qk_weight = nn.Parameter(torch.Tensor(2, self.config.shared_representation_size)) self.qk_bias = nn.Parameter(torch.Tensor(2, self.config.shared_representation_size)) if self.config.relative_positional_bias == 'simple': self.rel_pos_bias = MegaSimpleRelativePositionalBias(config) elif self.config.relative_positional_bias == 'rotary': self.rel_pos_bias = MegaRotaryRelativePositionalBias(config) else: raise ValueError(f'Unknown relative positional bias: {self.config.relative_positional_bias}') self.softmax = nn.Softmax(dim=-1) self.attention_function = self.softmax_attention if self.config.attention_activation == 'softmax' else self.element_attention def element_attention(self, query, key, padding_mask, causal_mask): """ Apply element-wise attention via relu^2 or laplace. Same as original implementation but with standardized causal attention mask. Expects the Hugging Face standard attention mask paradigm: 1 for not masked, and 0 for masked. """ seq_len = key.size(2) if padding_mask is not None: lengths = padding_mask.sum(-1, keepdim=True) lengths = lengths.clamp(min=1.0).unsqueeze(-1) else: lengths = seq_len if causal_mask is not None: lengths = causal_mask.sum(dim=-1, keepdim=True) bias = self.rel_pos_bias(seq_len) if seq_len != query.size(2): if query.size(2) != 1: raise ValueError('Size mismatch between Q and K in element attention') bias = bias[-1:] qk = torch.matmul(query, key.transpose(2, 3)) / lengths + bias attn_weights = ACT2FN[self.config.attention_activation](qk).type_as(qk) if padding_mask is not None: attn_weights = attn_weights * padding_mask.unsqueeze(2) if causal_mask is not None: attn_weights = attn_weights * causal_mask return attn_weights def softmax_attention(self, query, key, padding_mask, causal_mask): """Standard softmax self-attention, as in the original Transformer paper""" seq_len = key.size(2) bias = self.rel_pos_bias(seq_len) if seq_len != query.size(2): if query.size(2) != 1: raise ValueError('Size mismatch between Q and K in softmax attention') bias = bias[-1:] query = query * self.scaling qk = torch.matmul(query, key.transpose(2, 3)) + bias if causal_mask is not None: additive_causal_mask = torch.zeros_like(causal_mask, dtype=qk.dtype) additive_causal_mask = additive_causal_mask.masked_fill((1 - causal_mask).bool(), float('-inf')) qk = qk + additive_causal_mask if padding_mask is not None: padding_mask = 1 - padding_mask padding_mask_all = padding_mask.all(dim=-1, keepdim=True) padding_mask = torch.logical_and(padding_mask, ~padding_mask_all) qk = qk.masked_fill(padding_mask.unsqueeze(2).to(torch.bool), float('-inf')) attn_weights = self.softmax(qk).type_as(qk) return attn_weights def forward(self, input, padding_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions=False, use_cache=False): """ Mega's self-attention block, which combines multi-headed EMA with traditional self-attention Args: input (`torch.Tensor` of shape `(sequence_length, batch_size, hidden_size)`): Hidden states to be updated by Mega's self-attention padding_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0 for *masked* causal_mask (`torch.LongTensor` of shape `(sequence_length, sequence_length)`, *optional*): Indicates which inputs are to be ignored due to causal attention, where elements are either 1 for *not masked* or 0 for *masked* past_key_values (`tuple(torch.Tensor)`, *optional*): The hidden states returned from the previous timestep during incremental decoding; expects that self-attention key, value, and EMA states are the first 3 entries in the tuple output_attentions (`bool`, default `False`): Whether to return self-attention weights use_cache (`bool`, default `False`): Whether to perform incremental decoding; uses `past_key_values` as prior state, and returns the updated states for use in the next step Returns: `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and inputs: - **hidden_states** (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`) -- Hidden states from target sequence updated by Mega's self-attention - **attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape `(batch_size, 1, sequence_length, sequence_length)` -- The self-attention weights corresponding to how each token in the input sequence attends to every other token - **self_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, sequence_length, config.shared_representation_size)` -- The self-attention key state for use in the next step of incremental decoding - **self_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, sequence_length, config.hidden_size)` -- The self-attention value state for use in the next step of incremental decoding - **self_ema_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, config.ndim)` The incremental EMA state for use in the next step of incremental decoding. """ seq_len, bsz, embed_dim = input.size() if embed_dim != self.config.hidden_size: raise ValueError(f'Input embedding dimension should be {self.config.hidden_size}; received {embed_dim}') residual = input if self.config.normalize_before_mega: input = self.norm(input) value = self.activation(self.v_proj(input)) if self.config.is_decoder and past_key_values is not None: if seq_len > 1: raise ValueError(f'Incremental decoding only supports self sequence length of 1; received {seq_len}') prev_self_key, prev_self_value, prev_ema_state = past_key_values[0:3] else: prev_self_key = prev_self_value = prev_ema_state = None ema_out, updated_ema_state = self.ema_gate(input, attention_mask=padding_mask, prev_state=prev_ema_state, use_cache=use_cache) ema_out = self.dropout(ema_out) base = self.mx_proj(ema_out) residual_weight, query_key_gates, intermediate_state = torch.split(base, [self.config.hidden_size, self.config.shared_representation_size + self.config.intermediate_size, self.config.hidden_size], dim=-1) residual_weight = torch.sigmoid(residual_weight) query_key_gates = F.silu(query_key_gates) query_key, attention_gate = torch.split(query_key_gates, [self.config.shared_representation_size, self.config.intermediate_size], dim=-1) query_key = query_key.unsqueeze(2) * self.qk_weight + self.qk_bias query, key = torch.unbind(query_key, dim=2) query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) if self.config.is_decoder: if prev_self_key is not None: key = torch.cat([prev_self_key, key], dim=1) if prev_self_value is not None: value = torch.cat([prev_self_value, value], dim=1) if not self.config.use_chunking: updated_self_key = key updated_self_value = value else: curr_len = key.size(1) % self.config.chunk_size if curr_len == 0: updated_self_key = None updated_self_value = None else: updated_self_key = key updated_self_value = value ctx_len = key.size(1) if not self.config.use_chunking: query = query.unsqueeze(1) key = key.unsqueeze(1) value = value.unsqueeze(1) if padding_mask is not None: padding_mask = padding_mask.unsqueeze(1) else: if seq_len < self.config.chunk_size: query = query.unsqueeze(1) else: n_chunks = seq_len // self.config.chunk_size query = query.reshape(bsz, n_chunks, self.config.chunk_size, self.config.shared_representation_size) if ctx_len < self.config.chunk_size: key = key.unsqueeze(1) value = value.unsqueeze(1) if padding_mask is not None: padding_mask = padding_mask.unsqueeze(1) else: n_chunks = ctx_len // self.config.chunk_size key = key.reshape(bsz, n_chunks, self.config.chunk_size, self.config.shared_representation_size) value = value.reshape(bsz, n_chunks, self.config.chunk_size, self.config.intermediate_size) if padding_mask is not None: padding_mask = padding_mask.view(bsz, n_chunks, self.config.chunk_size) if padding_mask is not None and padding_mask.dim() == 0: padding_mask = None attn_weights = self.attention_function(query, key, padding_mask=padding_mask, causal_mask=causal_mask) value = self.hidden_dropout(value, batch_first=True) kernel = self.attention_dropout(attn_weights) weighted_self_output = torch.matmul(kernel, value).view(bsz, seq_len, self.config.intermediate_size).transpose(0, 1) weighted_self_output = self.activation(intermediate_state + self.h_proj(weighted_self_output * attention_gate)) weighted_self_output = self.dropout(weighted_self_output) out = torch.addcmul(residual, residual_weight, weighted_self_output - residual) if not self.config.normalize_before_mega: out = self.norm(out) return_values = (out, attn_weights) if output_attentions else (out,) if self.config.is_decoder: return_values = return_values + (updated_self_key, updated_self_value, updated_ema_state) return return_values
class MegaMovingAverageGatedAttention(nn.Module): ''' Pure PyTorch implementation of Mega block; see https://huggingface.co/papers/2209.10655 and original fairseq implementation at https://github.com/facebookresearch/mega (copyright Meta Research, licensed under MIT License) Differences from original implementation include hidden state refactor and fixed inconsistency with additive / multiplicative attention masks ''' def __init__(self, config: MegaConfig): pass def element_attention(self, query, key, padding_mask, causal_mask): ''' Apply element-wise attention via relu^2 or laplace. Same as original implementation but with standardized causal attention mask. Expects the Hugging Face standard attention mask paradigm: 1 for not masked, and 0 for masked. ''' pass def softmax_attention(self, query, key, padding_mask, causal_mask): '''Standard softmax self-attention, as in the original Transformer paper''' pass def forward(self, input, padding_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions=False, use_cache=False): ''' Mega's self-attention block, which combines multi-headed EMA with traditional self-attention Args: input (`torch.Tensor` of shape `(sequence_length, batch_size, hidden_size)`): Hidden states to be updated by Mega's self-attention padding_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0 for *masked* causal_mask (`torch.LongTensor` of shape `(sequence_length, sequence_length)`, *optional*): Indicates which inputs are to be ignored due to causal attention, where elements are either 1 for *not masked* or 0 for *masked* past_key_values (`tuple(torch.Tensor)`, *optional*): The hidden states returned from the previous timestep during incremental decoding; expects that self-attention key, value, and EMA states are the first 3 entries in the tuple output_attentions (`bool`, default `False`): Whether to return self-attention weights use_cache (`bool`, default `False`): Whether to perform incremental decoding; uses `past_key_values` as prior state, and returns the updated states for use in the next step Returns: `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and inputs: - **hidden_states** (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`) -- Hidden states from target sequence updated by Mega's self-attention - **attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape `(batch_size, 1, sequence_length, sequence_length)` -- The self-attention weights corresponding to how each token in the input sequence attends to every other token - **self_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, sequence_length, config.shared_representation_size)` -- The self-attention key state for use in the next step of incremental decoding - **self_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, sequence_length, config.hidden_size)` -- The self-attention value state for use in the next step of incremental decoding - **self_ema_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, config.ndim)` The incremental EMA state for use in the next step of incremental decoding. ''' pass
5
4
81
11
45
25
9
0.6
1
11
6
0
4
16
4
14
334
49
179
59
166
107
137
51
132
20
1
3
37
1,822
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaMultiDimensionDampedEma
import math from torch import nn import torch.nn.functional as F from .configuration_mega import MegaConfig import torch from typing import Optional, Union class MegaMultiDimensionDampedEma(nn.Module): """ Mega's Exponential Moving Average layer, largely left unmodified from the original repo with the exception of variable names and moving away from the stateful representation of incremental decoding state. See "https://huggingface.co/papers/2209.10655" for more details. """ def __init__(self, config: MegaConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.ndim = config.ema_projection_size self.bidirectional = config.bidirectional self.truncation = config.truncation self.scale = math.sqrt(1.0 / self.ndim) kernel_dim = 2 * config.hidden_size if self.bidirectional else config.hidden_size self.damping_factor = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1)) self.decay_factor = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1)) self.ema_expansion_matrix = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1)) self.kernel_projection_matrix = nn.Parameter(torch.Tensor(kernel_dim, self.ndim)) self.residual_weight = nn.Parameter(torch.Tensor(config.hidden_size)) self._kernel = None self._coeffs = None def _compute_ema_coefficients(self): self._coeffs = None damping_factor = torch.sigmoid(self.damping_factor) decay_factor = torch.sigmoid(self.decay_factor) previous_timestep_weight = 1.0 - damping_factor * decay_factor return (damping_factor, previous_timestep_weight) def _compute_efficient_ema_kernel(self, length: int): self._kernel = None damping_factor, previous_timestep_weight = self._compute_ema_coefficients() vander = torch.arange(length).to(damping_factor).view(1, 1, length) * torch.log(previous_timestep_weight) kernel = damping_factor * self.ema_expansion_matrix * torch.exp(vander) return torch.einsum('dnl,dn->dl', kernel, self.kernel_projection_matrix * self.scale) def get_ema_coefficients(self): if self.training: return self._compute_ema_coefficients() else: if self._coeffs is None: self._coeffs = self._compute_ema_coefficients() return self._coeffs def get_ema_kernel(self, length: int): kernel_size = length if self.truncation is None else min(self.truncation, length) if self.training: return self._compute_efficient_ema_kernel(kernel_size) else: if self._kernel is None or self._kernel.size(-1) < kernel_size: self._kernel = self._compute_efficient_ema_kernel(kernel_size) return self._kernel[..., :kernel_size] def fft_convolution(self, inputs, kernel, length): inputs_fft = torch.fft.rfft(inputs.float(), n=2 * length) kernel_fft = torch.fft.rfft(kernel.float(), n=2 * length) convolved_sequence = torch.fft.irfft(inputs_fft * kernel_fft, n=2 * length) return convolved_sequence def ema_step(self, inputs, length, past_state=None): if length == 1: return self.one_ema_step(inputs, past_state=past_state) damping_factor, previous_timestep_weight = self.get_ema_coefficients() vander = torch.arange(length + 1).to(damping_factor).view(1, 1, length + 1) * torch.log(previous_timestep_weight) vander = torch.exp(vander) if past_state is not None: past_ema_proj = vander[:, :, 1:] * (self.kernel_projection_matrix * self.scale).unsqueeze(-1) past_ema_state = torch.einsum('bdn,dnl->bdl', past_state, past_ema_proj) past_vandermonde = vander[:, :, -1] * past_state else: past_ema_state = None past_vandermonde = None vander = vander[:, :, :-1] kernel = damping_factor * self.ema_expansion_matrix * vander kernel_proj = torch.einsum('dnl,dn->dl', kernel, self.kernel_projection_matrix * self.scale) ema_output = self.fft_convolution(inputs, kernel_proj, length=length)[..., 0:length] ema_output = ema_output.type_as(inputs) if past_ema_state is not None: ema_output = ema_output + past_ema_state updated_hidden_state = torch.einsum('bdl,dnl->bdn', inputs, torch.flip(kernel, dims=[2])) if past_vandermonde is not None: updated_hidden_state = updated_hidden_state + past_vandermonde return (ema_output.permute(2, 0, 1), updated_hidden_state) def one_ema_step(self, inputs, past_state=None): damping_factor, previous_timestep_weight = self.get_ema_coefficients() updated_state = (damping_factor * self.ema_expansion_matrix).squeeze(-1) * inputs if past_state is not None: updated_state = updated_state + previous_timestep_weight.squeeze(-1) * past_state out = torch.einsum('bdn,dn->bd', updated_state, self.kernel_projection_matrix * self.scale) return (out.unsqueeze(0), updated_state) def forward(self, inputs, attention_mask: Optional[torch.Tensor]=None, prev_state: Optional[torch.Tensor]=None, use_cache: bool=False) -> torch.Tensor: """ Mega's exponential moving average (EMA) sub-layer applied prior to single-headed (traditional) self-attention Args: inputs (`torch.Tensor` of shape `(sequence_length, batch_size, hidden_size)`): Hidden state / embedding input to update via EMA based on FFT convolution attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indicates which inputs are to be ignored (mostly due to padding), where elements are either 1 for *not masked* or 0 for *masked* prev_state (`torch.Tensor` of shape `(batch_size, config.ndim)`, *optional*): The hidden state returned from the previous timestep during incremental decoding. use_cache (`bool`, default `False`): Whether to perform incremental decoding; uses `prev_state` as the prior timestep, and returns the updated EMA hidden state for use in the next step Returns: `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and inputs: - **hidden_states** (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`) -- Hidden states updated by EMA, with same shapes as inputs - **updated_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor of shape `(batch_size, config.ndim)` -- The incremental EMA state for use in the next step of incremental decoding """ seq_len, bsz, embed_dim = inputs.size() if embed_dim != self.embed_dim: raise ValueError(f'Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}') residual = inputs * self.residual_weight inputs = inputs.permute(1, 2, 0) if attention_mask is not None: inputs = inputs * attention_mask.unsqueeze(1).type_as(inputs) if self.bidirectional and use_cache: raise RuntimeError('Bidirectional EMA does not support incremental state') if use_cache: out, updated_state = self.ema_step(inputs, seq_len, past_state=prev_state) out = F.silu(out + residual) return (out, updated_state) else: kernel = self.get_ema_kernel(seq_len) fft_len = seq_len s_index = 0 kernel_size = kernel.size(1) if self.bidirectional: k1, k2 = torch.split(kernel, [self.embed_dim, self.embed_dim], dim=0) kernel = F.pad(k1, (kernel_size - 1, 0)) + F.pad(k2.flip(-1), (0, kernel_size - 1)) inputs = F.pad(inputs, (kernel_size - 1, 0)) fft_len = fft_len + kernel_size - 1 s_index = 2 * kernel_size - 2 ema_output = self.fft_convolution(inputs, kernel, length=fft_len)[..., s_index:s_index + seq_len] ema_output = ema_output.type_as(inputs) gated_ema_output = F.silu(ema_output.permute(2, 0, 1) + residual) return (gated_ema_output, None)
class MegaMultiDimensionDampedEma(nn.Module): ''' Mega's Exponential Moving Average layer, largely left unmodified from the original repo with the exception of variable names and moving away from the stateful representation of incremental decoding state. See "https://huggingface.co/papers/2209.10655" for more details. ''' def __init__(self, config: MegaConfig): pass def _compute_ema_coefficients(self): pass def _compute_efficient_ema_kernel(self, length: int): pass def get_ema_coefficients(self): pass def get_ema_kernel(self, length: int): pass def fft_convolution(self, inputs, kernel, length): pass def ema_step(self, inputs, length, past_state=None): pass def one_ema_step(self, inputs, past_state=None): pass def forward(self, inputs, attention_mask: Optional[torch.Tensor]=None, prev_state: Optional[torch.Tensor]=None, use_cache: bool=False) -> torch.Tensor: ''' Mega's exponential moving average (EMA) sub-layer applied prior to single-headed (traditional) self-attention Args: inputs (`torch.Tensor` of shape `(sequence_length, batch_size, hidden_size)`): Hidden state / embedding input to update via EMA based on FFT convolution attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indicates which inputs are to be ignored (mostly due to padding), where elements are either 1 for *not masked* or 0 for *masked* prev_state (`torch.Tensor` of shape `(batch_size, config.ndim)`, *optional*): The hidden state returned from the previous timestep during incremental decoding. use_cache (`bool`, default `False`): Whether to perform incremental decoding; uses `prev_state` as the prior timestep, and returns the updated EMA hidden state for use in the next step Returns: `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and inputs: - **hidden_states** (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`) -- Hidden states updated by EMA, with same shapes as inputs - **updated_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor of shape `(batch_size, config.ndim)` -- The incremental EMA state for use in the next step of incremental decoding ''' pass
10
2
21
2
13
6
3
0.51
1
7
1
0
9
13
9
19
207
27
119
62
103
61
105
56
95
6
1
2
25
1,823
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaNormalizedFeedForwardNetwork
from ....activations import ACT2FN from torch import nn from .configuration_mega import MegaConfig class MegaNormalizedFeedForwardNetwork(nn.Module): """ Normalized feed-forward network used in Mega blocks. Left as-is from original Mega repo aside from retrieving args from Hugging Face config """ def __init__(self, config: MegaConfig): super().__init__() self.config = config self.hidden_dim = config.nffn_hidden_size self.act_fn = config.activation self.activation = ACT2FN[config.activation] self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout) self.hidden_dropout = MegaDropout(self.config.nffn_activation_dropout_prob, is_featurewise=self.config.use_feature_dropout) self.prenorm = self.config.normalize_before_ffn self.norm = MegaSequenceNorm(self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine) self.fc1 = nn.Linear(self.config.hidden_size, self.config.nffn_hidden_size) self.fc2 = nn.Linear(self.config.nffn_hidden_size, self.config.hidden_size) def forward(self, inputs): residual = inputs if self.prenorm: inputs = self.norm(inputs) hidden = self.activation(self.fc1(inputs)) hidden = self.hidden_dropout(hidden) output = self.fc2(hidden) output = self.dropout(output) output = output + residual if not self.prenorm: output = self.norm(output) return output
class MegaNormalizedFeedForwardNetwork(nn.Module): ''' Normalized feed-forward network used in Mega blocks. Left as-is from original Mega repo aside from retrieving args from Hugging Face config ''' def __init__(self, config: MegaConfig): pass def forward(self, inputs): pass
3
1
18
4
14
0
2
0.14
1
4
3
0
2
10
2
12
43
10
29
16
26
4
25
16
22
3
1
1
4
1,824
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaPooler
import torch import torch.nn.functional as F from torch import nn class MegaPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class MegaPooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
5
1
1
0.2
1
2
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
1,825
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaPreTrainedModel
from torch import nn import torch.nn.functional as F from .configuration_mega import MegaConfig from ....modeling_utils import PreTrainedModel import torch class MegaPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: MegaConfig base_model_prefix = 'mega' supports_gradient_checkpointing = False _no_split_modules = ['MegaMovingAverageGatedAttention'] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, MegaMultiDimensionDampedEma): with torch.no_grad(): nn.init.normal_(module.damping_factor, mean=0.0, std=self.config.ema_delta_alpha_range) nn.init.normal_(module.decay_factor, mean=0.0, std=self.config.ema_delta_alpha_range) val = torch.ones(self.config.ema_projection_size, 1) if self.config.ema_projection_size > 1: idx = torch.tensor(list(range(1, self.config.ema_projection_size, 2))) val.index_fill_(0, idx, -1.0) module.ema_expansion_matrix.normal_(mean=0.0, std=self.config.ema_beta_range).add_(val) nn.init.normal_(module.kernel_projection_matrix, mean=0.0, std=self.config.ema_gamma_omega_range) nn.init.normal_(module.residual_weight, mean=0.0, std=self.config.ema_gamma_omega_range) elif isinstance(module, MegaSimpleRelativePositionalBias): nn.init.normal_(module.rel_pos_bias, mean=0.0, std=self.config.initializer_range) elif isinstance(module, MegaRotaryRelativePositionalBias): nn.init.normal_(module.alpha, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.b_param, mean=0.0, std=self.config.initializer_range) elif isinstance(module, MegaScaleNorm): if self.config.norm_affine: nn.init.constant_(module.scalar, 1.0) elif isinstance(module, MegaRMSNorm): if self.config.norm_affine: nn.init.constant_(module.weight, 1.0) elif isinstance(module, MegaMovingAverageGatedAttention): nn.init.normal_(module.qk_weight, mean=0.0, std=self.config.initializer_range) nn.init.constant_(module.qk_bias, 0.0) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class MegaPreTrainedModel(PreTrainedModel): ''' An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. ''' def _init_weights(self, module): '''Initialize the weights''' pass
2
2
43
0
37
6
15
0.24
1
8
6
7
1
0
1
130
54
2
42
8
40
10
34
8
32
15
2
3
15
1,826
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaRMSNorm
import torch.nn.functional as F from torch import nn import torch class MegaRMSNorm(nn.Module): """ RMSNorm used in Mega implementation. Differs from T5's RMSNorm by applying the weight prior to taking the square root (as opposed to after in T5) """ def __init__(self, number_features, eps=1e-06, affine=True): super().__init__() self.num_features = number_features self.eps = eps self.affine = affine if affine: self.weight = nn.Parameter(torch.Tensor(self.num_features)) else: self.register_parameter('weight', None) def forward(self, input): mean_square = torch.mean(torch.square(input), dim=-1, keepdim=True) if self.weight is not None: input = input * self.weight input * torch.rsqrt(mean_square + self.eps) return input def extra_repr(self): return f'{self.num_features}, eps={self.eps}, affine={self.affine}'
class MegaRMSNorm(nn.Module): ''' RMSNorm used in Mega implementation. Differs from T5's RMSNorm by applying the weight prior to taking the square root (as opposed to after in T5) ''' def __init__(self, number_features, eps=1e-06, affine=True): pass def forward(self, input): pass def extra_repr(self): pass
4
1
6
0
6
0
2
0.22
1
2
0
0
3
4
3
13
26
4
18
9
14
4
17
9
13
2
1
1
5
1,827
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaRotaryRelativePositionalBias
import math from torch import nn import torch.nn.functional as F from .configuration_mega import MegaConfig import torch class MegaRotaryRelativePositionalBias(nn.Module): """ Rotary relative bias for positional information; similar in concept to RoPE (i.e. RoFormer) but taken from the Mega repo due to differences in implementation. When initialized, produces a positional bias which ranges from position 0 to config.max_positions, but can extrapolate to longer sequences. Can be indexed according to input position IDs """ def __init__(self, config: MegaConfig): super().__init__() if config.hidden_size % 2 != 0: raise RuntimeError('Rotary positional bias requires `hidden_size` to be a multiple of 2') self.config = config self.embed_dim = config.shared_representation_size self.max_positions = self.config.max_positions if self.config.chunk_size < 0 else self.config.chunk_size self.sine, self.cosine = MegaRotaryRelativePositionalBias.get_sinusoid_embeddings(config.max_positions, self.embed_dim) self.alpha = nn.Parameter(torch.Tensor(1, self.embed_dim)) self.b_param = nn.Parameter(torch.Tensor(1, self.embed_dim)) self.register_buffer('_float_tensor', torch.FloatTensor([0.0])) @staticmethod def get_sinusoid_embeddings(max_positions: int, embedding_dim: int): half_dim = embedding_dim // 2 emb = math.log(10000) / half_dim emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(max_positions, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) return (torch.sin(emb), torch.cos(emb)) def rotary(self, input): seq_len, embed_dim = input.size() chunk_1, chunk_2 = torch.chunk(input, 2, dim=-1) if self.sine is None or seq_len > self.sine.size(0): self.sine, self.cosine = MegaRotaryRelativePositionalBias.get_sinusoid_embeddings(seq_len, embed_dim) self.max_positions = seq_len self.sine = self.sine.to(self._float_tensor) self.cosine = self.cosine.to(self._float_tensor) sin = self.sine[:seq_len] cos = self.cosine[:seq_len] return torch.cat([chunk_1 * cos - chunk_2 * sin, chunk_2 * cos + chunk_1 * sin], dim=1) def forward(self, seq_len): rotary_alpha = self.rotary(self.alpha.expand(seq_len, self.embed_dim)) rotary_beta = self.rotary(self.b_param.expand(seq_len, self.embed_dim)) bias = torch.einsum('mk,nk->mn', rotary_alpha, rotary_beta) return bias
class MegaRotaryRelativePositionalBias(nn.Module): ''' Rotary relative bias for positional information; similar in concept to RoPE (i.e. RoFormer) but taken from the Mega repo due to differences in implementation. When initialized, produces a positional bias which ranges from position 0 to config.max_positions, but can extrapolate to longer sequences. Can be indexed according to input position IDs ''' def __init__(self, config: MegaConfig): pass @staticmethod def get_sinusoid_embeddings(max_positions: int, embedding_dim: int): pass def rotary(self, input): pass def forward(self, seq_len): pass
6
1
10
0
9
1
2
0.22
1
6
1
0
3
7
4
14
51
6
37
21
31
8
34
20
29
3
1
1
7
1,828
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaScaleNorm
import torch import torch.nn.functional as F from torch import nn class MegaScaleNorm(nn.Module): """ Scale normalization introduced in MEGA which is similar to RMSNorm, but uses a single parameter for scalar multiplication instead of a vector, and applies over a specified dimension """ def __init__(self, dim, eps=1e-06, affine=True): super().__init__() self.dim = dim self.eps = eps self.affine = affine if affine: self.scalar = nn.Parameter(torch.Tensor(1)) else: self.register_parameter('scalar', None) def forward(self, input): mean_square = torch.mean(torch.square(input), dim=self.dim, keepdim=True) if self.scalar is not None: input = self.scalar * input output = input * torch.rsqrt(mean_square + self.eps) return output
class MegaScaleNorm(nn.Module): ''' Scale normalization introduced in MEGA which is similar to RMSNorm, but uses a single parameter for scalar multiplication instead of a vector, and applies over a specified dimension ''' def __init__(self, dim, eps=1e-06, affine=True): pass def forward(self, input): pass
3
1
8
1
8
0
2
0.25
1
2
0
0
2
4
2
12
23
3
16
9
13
4
15
9
12
2
1
1
4
1,829
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaSequenceNorm
from torch import nn class MegaSequenceNorm(nn.Module): """ A wrapper class for various layer normalization options used in Mega. Used to handle differences in expectations on input axis locations for different normalization methods. """ def __init__(self, norm_type, embedding_dim, eps=1e-05, affine=True, export=False): super().__init__() if norm_type == 'layernorm': self.norm = nn.LayerNorm(embedding_dim, eps, elementwise_affine=affine) elif norm_type == 'scalenorm': self.norm = MegaScaleNorm(dim=-1, eps=eps, affine=affine) elif norm_type == 'rmsnorm': self.norm = MegaRMSNorm(embedding_dim, eps=eps, affine=affine) elif norm_type == 'batchnorm': self.norm = nn.BatchNorm1d(embedding_dim, eps=eps, affine=affine) elif norm_type == 'syncbatchnorm': self.norm = nn.SyncBatchNorm(embedding_dim, eps=eps, affine=affine) else: raise ValueError(f'Unknown norm type: {norm_type}') def forward(self, input): if isinstance(self.norm, nn.modules.batchnorm._BatchNorm): if input.dim() != 3: raise ValueError('BatchNorm inputs must be exactly 3-dimensional') input = input.permute(1, 2, 0) input = self.norm(input) return input.permute(2, 0, 1) else: return self.norm(input)
class MegaSequenceNorm(nn.Module): ''' A wrapper class for various layer normalization options used in Mega. Used to handle differences in expectations on input axis locations for different normalization methods. ''' def __init__(self, norm_type, embedding_dim, eps=1e-05, affine=True, export=False): pass def forward(self, input): pass
3
1
12
0
12
0
5
0.17
1
4
2
0
2
1
2
12
30
2
24
4
21
4
18
4
15
6
1
2
9
1,830
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mega/modeling_mega.py
transformers.models.deprecated.mega.modeling_mega.MegaSimpleRelativePositionalBias
import torch import torch.nn.functional as F from torch import nn from .configuration_mega import MegaConfig class MegaSimpleRelativePositionalBias(nn.Module): """ Simple relative positional embeddings copied from the Mega repo; renamed variables for better readability """ def __init__(self, config: MegaConfig): super().__init__() self.config = config self.max_positions = self.config.max_positions if self.config.chunk_size < 0 else self.config.chunk_size self.rel_pos_bias = nn.Parameter(torch.Tensor(2 * config.max_positions - 1)) def forward(self, seq_len): if seq_len > self.max_positions: raise ValueError(f'Sequence length {seq_len} going beyond max length {self.max_positions}') bias = self.rel_pos_bias[self.max_positions - seq_len:self.max_positions + seq_len - 1] tile = F.pad(bias, (0, seq_len)) tile = torch.tile(tile, (seq_len,)) tile = tile[:-seq_len] tile = tile.view(seq_len, 3 * seq_len - 2) start = (2 * seq_len - 1) // 2 end = tile.size(1) - start tile = tile[:, start:end] return tile
class MegaSimpleRelativePositionalBias(nn.Module): ''' Simple relative positional embeddings copied from the Mega repo; renamed variables for better readability ''' def __init__(self, config: MegaConfig): pass def forward(self, seq_len): pass
3
1
11
1
9
2
2
0.39
1
4
1
0
2
3
2
12
28
3
18
10
15
7
18
10
15
2
1
1
4
1,831
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mmbt/configuration_mmbt.py
transformers.models.deprecated.mmbt.configuration_mmbt.MMBTConfig
class MMBTConfig: """ This is the configuration class to store the configuration of a [`MMBTModel`]. It is used to instantiate a MMBT model according to the specified arguments, defining the model architecture. Args: config ([`PreTrainedConfig`]): Config of the underlying Transformer models. Its values are copied over to use a single config. num_labels (`int`, *optional*): Size of final Linear layer for classification. modal_hidden_size (`int`, *optional*, defaults to 2048): Embedding dimension of the non-text modality encoder. """ def __init__(self, config, num_labels=None, modal_hidden_size=2048): self.__dict__ = config.__dict__ self.modal_hidden_size = modal_hidden_size if num_labels: self.num_labels = num_labels
class MMBTConfig: ''' This is the configuration class to store the configuration of a [`MMBTModel`]. It is used to instantiate a MMBT model according to the specified arguments, defining the model architecture. Args: config ([`PreTrainedConfig`]): Config of the underlying Transformer models. Its values are copied over to use a single config. num_labels (`int`, *optional*): Size of final Linear layer for classification. modal_hidden_size (`int`, *optional*, defaults to 2048): Embedding dimension of the non-text modality encoder. ''' def __init__(self, config, num_labels=None, modal_hidden_size=2048): pass
2
1
5
0
5
0
2
1.83
0
0
0
0
1
3
1
1
19
2
6
5
4
11
6
5
4
2
0
1
2
1,832
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mmbt/modeling_mmbt.py
transformers.models.deprecated.mmbt.modeling_mmbt.MMBTForClassification
from torch.nn import CrossEntropyLoss, MSELoss from torch import nn from ....modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings @add_start_docstrings('\n MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n ', MMBT_START_DOCSTRING, MMBT_INPUTS_DOCSTRING) class MMBTForClassification(nn.Module): """ **labels**: (*optional*) `torch.LongTensor` of shape `(batch_size,)`: Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: *Tuple* comprising various elements depending on the configuration (config) and inputs: **loss**: (*optional*, returned when `labels` is provided) `torch.FloatTensor` of shape `(1,)`: Classification (or regression if config.num_labels==1) loss. **logits**: `torch.FloatTensor` of shape `(batch_size, config.num_labels)` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (*optional*, returned when `output_hidden_states=True`) list of `torch.FloatTensor` (one for the output of each layer + the output of the embeddings) of shape `(batch_size, sequence_length, hidden_size)`: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (*optional*, returned when `output_attentions=True`) list of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples: ```python # For example purposes. Not runnable. transformer = BertModel.from_pretrained("google-bert/bert-base-uncased") encoder = ImageEncoder(args) model = MMBTForClassification(config, transformer, encoder) outputs = model(input_modal, input_ids, labels=labels) loss, logits = outputs[:2] ```""" def __init__(self, config, transformer, encoder): super().__init__() self.num_labels = config.num_labels self.mmbt = MMBTModel(config, transformer, encoder) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, labels=None, return_dict=None): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mmbt(input_modal=input_modal, input_ids=input_ids, modal_start_tokens=modal_start_tokens, modal_end_tokens=modal_end_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, modal_token_type_ids=modal_token_type_ids, position_ids=position_ids, modal_position_ids=modal_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.num_labels == 1: loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n ', MMBT_START_DOCSTRING, MMBT_INPUTS_DOCSTRING) class MMBTForClassification(nn.Module): ''' **labels**: (*optional*) `torch.LongTensor` of shape `(batch_size,)`: Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: *Tuple* comprising various elements depending on the configuration (config) and inputs: **loss**: (*optional*, returned when `labels` is provided) `torch.FloatTensor` of shape `(1,)`: Classification (or regression if config.num_labels==1) loss. **logits**: `torch.FloatTensor` of shape `(batch_size, config.num_labels)` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (*optional*, returned when `output_hidden_states=True`) list of `torch.FloatTensor` (one for the output of each layer + the output of the embeddings) of shape `(batch_size, sequence_length, hidden_size)`: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (*optional*, returned when `output_attentions=True`) list of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples: ```python # For example purposes. Not runnable. transformer = BertModel.from_pretrained("google-bert/bert-base-uncased") encoder = ImageEncoder(args) model = MMBTForClassification(config, transformer, encoder) outputs = model(input_modal, input_ids, labels=labels) loss, logits = outputs[:2] ```''' def __init__(self, config, transformer, encoder): pass def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, labels=None, return_dict=None): pass
4
1
33
4
29
1
4
0.45
1
3
2
0
2
4
2
12
96
12
58
28
40
26
24
13
21
6
1
2
7
1,833
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mmbt/modeling_mmbt.py
transformers.models.deprecated.mmbt.modeling_mmbt.MMBTModel
from ....modeling_utils import ModuleUtilsMixin from ....modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput from torch import nn from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch @add_start_docstrings('The bare MMBT Model outputting raw hidden-states without any specific head on top.', MMBT_START_DOCSTRING) class MMBTModel(nn.Module, ModuleUtilsMixin): def __init__(self, config, transformer, encoder): super().__init__() self.config = config self.transformer = transformer self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings) @add_start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None): """ Returns: Examples: ```python # For example purposes. Not runnable. transformer = BertModel.from_pretrained("google-bert/bert-base-uncased") encoder = ImageEncoder(args) mmbt = MMBTModel(config, transformer, encoder) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_txt_shape = input_ids.size() elif inputs_embeds is not None: input_txt_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device modal_embeddings = self.modal_encoder(input_modal, start_token=modal_start_tokens, end_token=modal_end_tokens, position_ids=modal_position_ids, token_type_ids=modal_token_type_ids) input_modal_shape = modal_embeddings.size()[:-1] if token_type_ids is None: token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device) txt_embeddings = self.transformer.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1) input_shape = embedding_output.size()[:-1] if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) else: attention_mask = torch.cat([torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) else: encoder_attention_mask = torch.cat([torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.transformer.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.transformer.pooler(sequence_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value
@add_start_docstrings('The bare MMBT Model outputting raw hidden-states without any specific head on top.', MMBT_START_DOCSTRING) class MMBTModel(nn.Module, ModuleUtilsMixin): def __init__(self, config, transformer, encoder): pass @add_start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None): ''' Returns: Examples: ```python # For example purposes. Not runnable. transformer = BertModel.from_pretrained("google-bert/bert-base-uncased") encoder = ImageEncoder(args) mmbt = MMBTModel(config, transformer, encoder) ```''' pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass
8
1
29
4
23
2
4
0.09
2
4
2
0
4
3
4
28
123
19
95
39
70
9
42
20
37
12
1
1
15
1,834
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mmbt/modeling_mmbt.py
transformers.models.deprecated.mmbt.modeling_mmbt.ModalEmbeddings
import torch from torch import nn class ModalEmbeddings(nn.Module): """Generic Modal Embeddings which takes in an encoder, and a transformer embedding.""" def __init__(self, config, encoder, embeddings): super().__init__() self.config = config self.encoder = encoder self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size) self.position_embeddings = embeddings.position_embeddings self.token_type_embeddings = embeddings.token_type_embeddings self.word_embeddings = embeddings.word_embeddings self.LayerNorm = embeddings.LayerNorm self.dropout = nn.Dropout(p=config.hidden_dropout_prob) def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None): token_embeddings = self.proj_embeddings(self.encoder(input_modal)) seq_length = token_embeddings.size(1) if start_token is not None: start_token_embeds = self.word_embeddings(start_token) seq_length += 1 token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1) if end_token is not None: end_token_embeds = self.word_embeddings(end_token) seq_length += 1 token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1) if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device) position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length) if token_type_ids is None: token_type_ids = torch.zeros((input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = token_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class ModalEmbeddings(nn.Module): '''Generic Modal Embeddings which takes in an encoder, and a transformer embedding.''' def __init__(self, config, encoder, embeddings): pass def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None): pass
3
1
20
3
17
0
3
0.03
1
1
0
0
2
8
2
12
43
7
35
18
32
1
33
18
30
5
1
1
6
1,835
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/configuration_nat.py
transformers.models.deprecated.nat.configuration_nat.NatConfig
from ....configuration_utils import PretrainedConfig from ....utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices class NatConfig(BackboneConfigMixin, PretrainedConfig): """ This is the configuration class to store the configuration of a [`NatModel`]. It is used to instantiate a Nat model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nat [shi-labs/nat-mini-in1k-224](https://huggingface.co/shi-labs/nat-mini-in1k-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 64): Dimensionality of patch embedding. depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 5]`): Number of layers in each level of the encoder. num_heads (`list[int]`, *optional*, defaults to `[2, 4, 8, 16]`): Number of attention heads in each layer of the Transformer encoder. kernel_size (`int`, *optional*, defaults to 7): Neighborhood Attention kernel size. mlp_ratio (`float`, *optional*, defaults to 3.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 0.0): The initial value for the layer scale. Disabled if <=0. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import NatConfig, NatModel >>> # Initializing a Nat shi-labs/nat-mini-in1k-224 style configuration >>> configuration = NatConfig() >>> # Initializing a model (with random weights) from the shi-labs/nat-mini-in1k-224 style configuration >>> model = NatModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'nat' attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__(self, patch_size=4, num_channels=3, embed_dim=64, depths=[3, 4, 6, 5], num_heads=[2, 4, 8, 16], kernel_size=7, mlp_ratio=3.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-05, layer_scale_init_value=0.0, out_features=None, out_indices=None, **kwargs): super().__init__(**kwargs) self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.kernel_size = kernel_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.layer_scale_init_value = layer_scale_init_value self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
class NatConfig(BackboneConfigMixin, PretrainedConfig): ''' This is the configuration class to store the configuration of a [`NatModel`]. It is used to instantiate a Nat model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nat [shi-labs/nat-mini-in1k-224](https://huggingface.co/shi-labs/nat-mini-in1k-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 64): Dimensionality of patch embedding. depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 5]`): Number of layers in each level of the encoder. num_heads (`list[int]`, *optional*, defaults to `[2, 4, 8, 16]`): Number of attention heads in each layer of the Transformer encoder. kernel_size (`int`, *optional*, defaults to 7): Neighborhood Attention kernel size. mlp_ratio (`float`, *optional*, defaults to 3.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 0.0): The initial value for the layer scale. Disabled if <=0. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import NatConfig, NatModel >>> # Initializing a Nat shi-labs/nat-mini-in1k-224 style configuration >>> configuration = NatConfig() >>> # Initializing a model (with random weights) from the shi-labs/nat-mini-in1k-224 style configuration >>> model = NatModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, patch_size=4, num_channels=3, embed_dim=64, depths=[3, 4, 6, 5], num_heads=[2, 4, 8, 16], kernel_size=7, mlp_ratio=3.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-05, layer_scale_init_value=0.0, out_features=None, out_indices=None, **kwargs): pass
2
1
46
1
43
2
1
1.24
2
3
0
0
1
20
1
38
121
11
49
43
27
61
24
23
22
1
2
0
1
1,836
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatBackbone
from ....utils import ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends import torch from torch import nn from typing import Optional, Union from ....utils.backbone_utils import BackboneMixin from ....modeling_outputs import BackboneOutput @add_start_docstrings('NAT backbone, to be used with frameworks like DETR and MaskFormer.', NAT_START_DOCSTRING) class NatBackbone(NatPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) requires_backends(self, ['natten']) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * 2 ** i) for i in range(len(config.depths))] hidden_states_norms = {} for stage, num_channels in zip(self.out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224") >>> model = AutoBackbone.from_pretrained( ... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 512, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output = self.embeddings(pixel_values) outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, return_dict=True) hidden_states = outputs.reshaped_hidden_states feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: batch_size, num_channels, height, width = hidden_state.shape hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous() hidden_state = hidden_state.view(batch_size, height * width, num_channels) hidden_state = self.hidden_states_norms[stage](hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
@add_start_docstrings('NAT backbone, to be used with frameworks like DETR and MaskFormer.', NAT_START_DOCSTRING) class NatBackbone(NatPreTrainedModel, BackboneMixin): def __init__(self, config): pass def get_input_embeddings(self): pass @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: ''' Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224") >>> model = AutoBackbone.from_pretrained( ... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 512, 7, 7] ```''' pass
7
1
32
6
18
8
4
0.4
2
9
3
0
3
4
3
145
100
19
58
24
46
23
38
17
34
9
3
2
12
1,837
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatDownsampler
import torch from torch import nn class NatDownsampler(nn.Module): """ Convolutional Downsampling Layer. Args: dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None: super().__init__() self.dim = dim self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.norm = norm_layer(2 * dim) def forward(self, input_feature: torch.Tensor) -> torch.Tensor: input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) input_feature = self.norm(input_feature) return input_feature
class NatDownsampler(nn.Module): ''' Convolutional Downsampling Layer. Args: dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. ''' def __init__(self, dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None: pass def forward(self, input_feature: torch.Tensor) -> torch.Tensor: pass
3
1
5
0
5
0
1
0.8
1
3
0
0
2
3
2
12
21
3
10
6
7
8
10
6
7
1
1
0
2
1,838
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatDropPath
from typing import Optional, Union from torch import nn import torch class NatDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f'p={self.drop_prob}'
class NatDropPath(nn.Module): '''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).''' def __init__(self, drop_prob: Optional[float]=None) -> None: pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass def extra_repr(self) -> str: pass
4
1
2
0
2
0
1
0.13
1
4
0
0
3
1
3
13
12
3
8
5
4
1
8
5
4
1
1
0
3
1,839
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatEmbeddings
from torch import nn from typing import Optional, Union import torch class NatEmbeddings(nn.Module): """ Construct the patch and position embeddings. """ def __init__(self, config): super().__init__() self.patch_embeddings = NatPatchEmbeddings(config) self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor]: embeddings = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class NatEmbeddings(nn.Module): ''' Construct the patch and position embeddings. ''' def __init__(self, config): pass def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor]: pass
3
1
7
2
5
0
1
0.27
1
3
1
0
2
3
2
12
20
6
11
7
8
3
11
7
8
1
1
0
2
1,840
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatEncoder
from torch import nn import torch from typing import Optional, Union class NatEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_levels = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')] self.levels = nn.ModuleList([NatStage(config=config, dim=int(config.embed_dim * 2 ** i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path_rate=dpr[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=NatDownsampler if i_layer < self.num_levels - 1 else None) for i_layer in range(self.num_levels)]) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, NatEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.levels): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] if output_hidden_states and output_hidden_states_before_downsampling: reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and (not output_hidden_states_before_downsampling): reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return NatEncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states)
class NatEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, NatEncoderOutput]: pass
3
0
33
4
28
2
6
0.05
1
10
3
0
2
3
2
12
67
8
56
21
46
3
31
14
28
10
1
2
12
1,841
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatEncoderOutput
import torch from ....utils import ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends from typing import Optional, Union from dataclasses import dataclass @dataclass class NatEncoderOutput(ModelOutput): """ Nat encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class NatEncoderOutput(ModelOutput): ''' Nat encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. ''' pass
2
1
0
0
0
0
0
4
1
0
0
0
0
0
0
0
30
5
5
5
4
20
5
5
4
0
1
0
0
1,842
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatForImageClassification
from ....utils import ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends import torch from torch import nn from typing import Optional, Union @add_start_docstrings('\n Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n ', NAT_START_DOCSTRING) class NatForImageClassification(NatPreTrainedModel): def __init__(self, config): super().__init__(config) requires_backends(self, ['natten']) self.num_labels = config.num_labels self.nat = NatModel(config) self.classifier = nn.Linear(self.nat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=NatImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, NatImageClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nat(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: loss = self.loss_function(labels, logits, self.config) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return NatImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states)
@add_start_docstrings('\n Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n ', NAT_START_DOCSTRING) class NatForImageClassification(NatPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=NatImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, NatImageClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
6
1
38
6
29
4
7
0.12
1
5
2
0
2
3
2
132
85
12
65
20
48
8
33
12
30
12
3
3
14
1,843
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatImageClassifierOutput
from dataclasses import dataclass import torch from ....utils import ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends from typing import Optional, Union @dataclass class NatImageClassifierOutput(ModelOutput): """ Nat outputs for image classification. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class NatImageClassifierOutput(ModelOutput): ''' Nat outputs for image classification. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. ''' pass
2
1
0
0
0
0
0
3.67
1
0
0
0
0
0
0
0
33
5
6
6
5
22
6
6
5
0
1
0
0
1,844
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatIntermediate
import torch from torch import nn from ....activations import ACT2FN class NatIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class NatIntermediate(nn.Module): def __init__(self, config, dim): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
4
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
1,845
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatLayer
from typing import Optional, Union import torch from torch import nn class NatLayer(nn.Module): def __init__(self, config, dim, num_heads, drop_path_rate=0.0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.kernel_size = config.kernel_size self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size) self.drop_path = NatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = NatIntermediate(config, dim) self.output = NatOutput(config, dim) self.layer_scale_parameters = nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True) if config.layer_scale_init_value > 0 else None def maybe_pad(self, hidden_states, height, width): window_size = self.kernel_size pad_values = (0, 0, 0, 0, 0, 0) if height < window_size or width < window_size: pad_l = pad_t = 0 pad_r = max(0, window_size - width) pad_b = max(0, window_size - height) pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b) hidden_states = nn.functional.pad(hidden_states, pad_values) return (hidden_states, pad_values) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]: batch_size, height, width, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape attention_outputs = self.attention(hidden_states, output_attentions=output_attentions) attention_output = attention_outputs[0] was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_output = attention_output[:, :height, :width, :].contiguous() if self.layer_scale_parameters is not None: attention_output = self.layer_scale_parameters[0] * attention_output hidden_states = shortcut + self.drop_path(attention_output) layer_output = self.layernorm_after(hidden_states) layer_output = self.output(self.intermediate(layer_output)) if self.layer_scale_parameters is not None: layer_output = self.layer_scale_parameters[1] * layer_output layer_output = hidden_states + self.drop_path(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs
class NatLayer(nn.Module): def __init__(self, config, dim, num_heads, drop_path_rate=0.0): pass def maybe_pad(self, hidden_states, height, width): pass def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]: pass
4
0
21
4
17
0
3
0.02
1
7
4
0
3
9
3
13
65
13
51
31
43
1
43
27
39
5
1
1
10
1,846
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatModel
from torch import nn from ....utils import ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends import torch from typing import Optional, Union @add_start_docstrings('The bare Nat Model transformer outputting raw hidden-states without any specific head on top.', NAT_START_DOCSTRING) class NatModel(NatPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) requires_backends(self, ['natten']) self.config = config self.num_levels = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1)) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=NatModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, NatModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return NatModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states)
@add_start_docstrings('The bare Nat Model transformer outputting raw hidden-states without any specific head on top.', NAT_START_DOCSTRING) class NatModel(NatPreTrainedModel): def __init__(self, config, add_pooling_layer=True): pass def get_input_embeddings(self): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=NatModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, NatModelOutput]: pass
8
1
18
3
13
1
3
0.08
1
7
3
0
4
7
4
134
83
16
62
25
43
5
35
18
30
7
3
1
12
1,847
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatModelOutput
from ....utils import ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends import torch from dataclasses import dataclass from typing import Optional, Union @dataclass class NatModelOutput(ModelOutput): """ Nat model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class NatModelOutput(ModelOutput): ''' Nat model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. ''' pass
2
1
0
0
0
0
0
3.67
1
0
0
0
0
0
0
0
33
5
6
6
5
22
6
6
5
0
1
0
0
1,848
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatOutput
import torch from torch import nn class NatOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class NatOutput(nn.Module): def __init__(self, config, dim): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
4
0
4
0
1
0
1
3
0
0
2
2
2
12
10
1
9
5
6
0
9
5
6
1
1
0
2
1,849
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatPatchEmbeddings
from torch import nn from typing import Optional, Union import torch class NatPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() patch_size = config.patch_size num_channels, hidden_size = (config.num_channels, config.embed_dim) self.num_channels = num_channels if patch_size == 4: pass else: raise ValueError('Dinat only supports patch size of 4 at the moment.') self.projection = nn.Sequential(nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embeddings = self.projection(pixel_values) embeddings = embeddings.permute(0, 2, 3, 1) return embeddings
class NatPatchEmbeddings(nn.Module): ''' This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a Transformer. ''' def __init__(self, config): pass def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor: pass
3
1
13
2
11
1
2
0.26
1
3
0
0
2
2
2
12
34
5
23
9
20
6
17
9
14
2
1
1
4
1,850
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatPreTrainedModel
from torch import nn from ....modeling_utils import PreTrainedModel from .configuration_nat import NatConfig class NatPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: NatConfig base_model_prefix = 'nat' main_input_name = 'pixel_values' def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class NatPreTrainedModel(PreTrainedModel): ''' An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. ''' def _init_weights(self, module): '''Initialize the weights''' pass
2
2
11
0
8
3
4
0.58
1
0
0
3
1
0
1
130
21
2
12
5
10
7
11
5
9
4
2
2
4
1,851
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NatStage
from torch import nn import torch from typing import Optional, Union class NatStage(nn.Module): def __init__(self, config, dim, depth, num_heads, drop_path_rate, downsample): super().__init__() self.config = config self.dim = dim self.layers = nn.ModuleList([NatLayer(config=config, dim=dim, num_heads=num_heads, drop_path_rate=drop_path_rate[i]) for i in range(depth)]) if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: _, height, width, _ = hidden_states.size() for i, layer_module in enumerate(self.layers): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: hidden_states = self.downsample(hidden_states_before_downsampling) stage_outputs = (hidden_states, hidden_states_before_downsampling) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs
class NatStage(nn.Module): def __init__(self, config, dim, depth, num_heads, drop_path_rate, downsample): pass def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass
3
0
21
3
18
1
3
0.03
1
6
1
0
2
5
2
12
44
6
37
17
30
1
22
13
19
4
1
1
6
1,852
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NeighborhoodAttention
import torch import math from typing import Optional, Union from torch import nn class NeighborhoodAttention(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() if dim % num_heads != 0: raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})') self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.kernel_size = kernel_size self.rpb = nn.Parameter(torch.zeros(num_heads, 2 * self.kernel_size - 1, 2 * self.kernel_size - 1)) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 3, 1, 2, 4) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = query_layer / math.sqrt(self.attention_head_size) attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, 1) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, 1) context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class NeighborhoodAttention(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): pass def transpose_for_scores(self, x): pass def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass
4
0
19
4
12
3
2
0.21
1
5
0
0
3
9
3
13
59
13
38
26
30
8
32
22
28
2
1
1
5
1,853
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nat/modeling_nat.py
transformers.models.deprecated.nat.modeling_nat.NeighborhoodAttentionModule
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from typing import Optional, Union import torch from torch import nn class NeighborhoodAttentionModule(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size) self.output = NeighborhoodAttentionOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class NeighborhoodAttentionModule(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): pass def prune_heads(self, heads): pass def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass
4
0
10
1
9
1
1
0.11
1
6
2
0
3
3
3
13
34
4
28
15
20
3
22
11
18
2
1
1
4
1,854
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/configuration_nezha.py
transformers.models.deprecated.nezha.configuration_nezha.NezhaConfig
from .... import PretrainedConfig class NezhaConfig(PretrainedConfig): """ This is the configuration class to store the configuration of an [`NezhaModel`]. It is used to instantiate an Nezha model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nezha [sijunhe/nezha-cn-base](https://huggingface.co/sijunhe/nezha-cn-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, optional, defaults to 21128): Vocabulary size of the NEZHA model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`NezhaModel`]. hidden_size (`int`, optional, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, optional, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, optional, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, optional, defaults to 3072): The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, optional, defaults to "gelu"): The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob (`float`, optional, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, optional, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, optional, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, optional, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`NezhaModel`]. initializer_range (`float`, optional, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, optional, defaults to 1e-12): The epsilon used by the layer normalization layers. classifier_dropout (`float`, optional, defaults to 0.1): The dropout ratio for attached classifiers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. Example: ```python >>> from transformers import NezhaConfig, NezhaModel >>> # Initializing an Nezha configuration >>> configuration = NezhaConfig() >>> # Initializing a model (with random weights) from the Nezha-base style configuration model >>> model = NezhaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'nezha' def __init__(self, vocab_size=21128, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, max_relative_position=64, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, classifier_dropout=0.1, pad_token_id=0, bos_token_id=2, eos_token_id=3, use_cache=True, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_relative_position = max_relative_position self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.classifier_dropout = classifier_dropout self.use_cache = use_cache
class NezhaConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of an [`NezhaModel`]. It is used to instantiate an Nezha model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nezha [sijunhe/nezha-cn-base](https://huggingface.co/sijunhe/nezha-cn-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, optional, defaults to 21128): Vocabulary size of the NEZHA model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`NezhaModel`]. hidden_size (`int`, optional, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, optional, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, optional, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, optional, defaults to 3072): The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, optional, defaults to "gelu"): The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob (`float`, optional, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, optional, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, optional, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, optional, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`NezhaModel`]. initializer_range (`float`, optional, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, optional, defaults to 1e-12): The epsilon used by the layer normalization layers. classifier_dropout (`float`, optional, defaults to 0.1): The dropout ratio for attached classifiers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. Example: ```python >>> from transformers import NezhaConfig, NezhaModel >>> # Initializing an Nezha configuration >>> configuration = NezhaConfig() >>> # Initializing a model (with random weights) from the Nezha-base style configuration model >>> model = NezhaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=21128, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, max_relative_position=64, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, classifier_dropout=0.1, pad_token_id=0, bos_token_id=2, eos_token_id=3, use_cache=True, **kwargs): pass
2
1
39
1
38
0
1
1.2
1
1
0
0
1
15
1
33
99
11
40
39
17
48
19
18
17
1
2
0
1
1,855
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaAttention
from ....cache_utils import Cache from typing import Optional, Union from ....utils.deprecation import deprecate_kwarg from ....pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from torch import nn import torch class NezhaAttention(nn.Module): def __init__(self, config): super().__init__() self.self = NezhaSelfAttention(config) self.output = NezhaSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class NezhaAttention(nn.Module): def __init__(self, config): pass def prune_heads(self, heads): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass
5
0
15
1
13
1
1
0.07
1
6
2
0
3
3
3
13
47
4
41
20
28
3
22
11
18
2
1
1
4
1,856
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaEmbeddings
from torch import nn import torch from typing import Optional, Union class NezhaEmbeddings(nn.Module): """Construct the embeddings from word and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('token_type_ids', torch.zeros((1, config.max_position_embeddings), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=inputs_embeds.device) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class NezhaEmbeddings(nn.Module): '''Construct the embeddings from word and token_type embeddings.''' def __init__(self, config): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: pass
3
1
23
3
17
3
3
0.17
1
2
0
0
2
4
2
12
49
8
35
18
27
6
26
13
23
5
1
2
6
1,857
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaEncoder
from ....cache_utils import Cache from typing import Optional, Union from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn import torch class NezhaEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([NezhaLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values[i] if past_key_values is not None else None, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
class NezhaEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: pass
3
0
45
4
41
0
9
0
1
8
2
0
2
3
2
12
91
8
83
26
68
0
35
14
32
17
1
3
18
1,858
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaForMaskedLM
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from typing import Optional, Union @add_start_docstrings('Nezha Model with a `language modeling` head on top.', NEZHA_START_DOCSTRING) class NezhaForMaskedLM(NezhaPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder'] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `NezhaForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.nezha = NezhaModel(config, add_pooling_layer=False) self.cls = NezhaOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MaskedLMOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] if self.config.pad_token_id is None: raise ValueError('The PAD token should be defined for generation') attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full((effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {'input_ids': input_ids, 'attention_mask': attention_mask}
@add_start_docstrings('Nezha Model with a `language modeling` head on top.', NEZHA_START_DOCSTRING) class NezhaForMaskedLM(NezhaPreTrainedModel): def __init__(self, config): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MaskedLMOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` ''' pass def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): pass
9
1
18
2
14
2
2
0.12
1
7
3
0
5
2
5
135
101
17
76
32
51
9
36
18
30
5
3
1
11
1,859
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaForMultipleChoice
from typing import Optional, Union from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn import torch @add_start_docstrings('\n Nezha Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', NEZHA_START_DOCSTRING) class NezhaForMultipleChoice(NezhaPreTrainedModel): def __init__(self, config): super().__init__(config) self.nezha = NezhaModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] print(pooled_output.shape) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) print(logits.shape) print(num_choices) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n Nezha Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', NEZHA_START_DOCSTRING) class NezhaForMultipleChoice(NezhaPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) ''' pass
6
1
38
4
31
4
6
0.1
1
5
2
0
2
3
2
132
83
8
68
27
48
7
31
15
28
10
3
1
12
1,860
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaForNextSentencePrediction
from typing import Optional, Union from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import warnings from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch @add_start_docstrings('Nezha Model with a `next sentence prediction (classification)` head on top.', NEZHA_START_DOCSTRING) class NezhaForNextSentencePrediction(NezhaPreTrainedModel): def __init__(self, config): super().__init__(config) self.nezha = NezhaModel(config) self.cls = NezhaOnlyNSPHead(config) self.post_init() @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], NextSentencePredictorOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring). Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Example: ```python >>> from transformers import AutoTokenizer, NezhaForNextSentencePrediction >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("sijunhe/nezha-cn-base") >>> model = NezhaForNextSentencePrediction.from_pretrained("sijunhe/nezha-cn-base") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") >>> outputs = model(**encoding, labels=torch.LongTensor([1])) >>> logits = outputs.logits >>> assert logits[0, 0] < logits[0, 1] # next sentence was random ``` """ if 'next_sentence_label' in kwargs: warnings.warn('The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning) labels = kwargs.pop('next_sentence_label') return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return (next_sentence_loss,) + output if next_sentence_loss is not None else output return NextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('Nezha Model with a `next sentence prediction (classification)` head on top.', NEZHA_START_DOCSTRING) class NezhaForNextSentencePrediction(NezhaPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], NextSentencePredictorOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring). Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Example: ```python >>> from transformers import AutoTokenizer, NezhaForNextSentencePrediction >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("sijunhe/nezha-cn-base") >>> model = NezhaForNextSentencePrediction.from_pretrained("sijunhe/nezha-cn-base") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") >>> outputs = model(**encoding, labels=torch.LongTensor([1])) >>> logits = outputs.logits >>> assert logits[0, 0] < logits[0, 1] # next sentence was random ``` ''' pass
6
1
45
9
26
11
4
0.41
1
7
3
0
2
2
2
132
94
18
54
24
37
22
22
11
19
6
3
1
7
1,861
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaForPreTraining
from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch from typing import Optional, Union @add_start_docstrings('\n Nezha Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ', NEZHA_START_DOCSTRING) class NezhaForPreTraining(NezhaPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder'] def __init__(self, config): super().__init__(config) self.nezha = NezhaModel(config) self.cls = NezhaPreTrainingHeads(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NezhaForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], NezhaForPreTrainingOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. kwargs (`dict[str, any]`, optional, defaults to *{}*): Used to hide legacy arguments that have been deprecated. Returns: Example: ```python >>> from transformers import AutoTokenizer, NezhaForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("sijunhe/nezha-cn-base") >>> model = NezhaForPreTraining.from_pretrained("sijunhe/nezha-cn-base") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return NezhaForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n Nezha Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ', NEZHA_START_DOCSTRING) class NezhaForPreTraining(NezhaPreTrainedModel): def __init__(self, config): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NezhaForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], NezhaForPreTrainingOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. kwargs (`dict[str, any]`, optional, defaults to *{}*): Used to hide legacy arguments that have been deprecated. Returns: Example: ```python >>> from transformers import AutoTokenizer, NezhaForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("sijunhe/nezha-cn-base") >>> model = NezhaForPreTraining.from_pretrained("sijunhe/nezha-cn-base") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits ``` ''' pass
8
1
23
4
13
7
2
0.46
1
6
3
0
4
2
4
134
100
18
56
29
37
26
27
16
22
5
3
1
8
1,862
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaForPreTrainingOutput
from typing import Optional, Union from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from dataclasses import dataclass import torch @dataclass class NezhaForPreTrainingOutput(ModelOutput): """ Output type of [`NezhaForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: Optional[torch.FloatTensor] = None seq_relationship_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass class NezhaForPreTrainingOutput(ModelOutput): ''' Output type of [`NezhaForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.5
1
0
0
0
0
0
0
0
31
4
6
6
5
21
6
6
5
0
1
0
0
1,863
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaForQuestionAnswering
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn import torch from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from typing import Optional, Union @add_start_docstrings('\n Nezha Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', NEZHA_START_DOCSTRING) class NezhaForQuestionAnswering(NezhaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.nezha = NezhaModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: """ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n Nezha Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', NEZHA_START_DOCSTRING) class NezhaForQuestionAnswering(NezhaPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: ''' start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. ''' pass
6
1
40
5
29
7
4
0.2
1
5
2
0
2
3
2
132
88
10
65
29
44
13
32
16
29
7
3
2
8
1,864
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaForSequenceClassification
from typing import Optional, Union from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn import torch @add_start_docstrings('\n Nezha Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', NEZHA_START_DOCSTRING) class NezhaForSequenceClassification(NezhaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.nezha = NezhaModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n Nezha Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', NEZHA_START_DOCSTRING) class NezhaForSequenceClassification(NezhaPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
6
1
41
4
34
4
7
0.09
1
6
2
0
2
5
2
132
90
9
74
27
54
7
36
15
33
12
3
3
14
1,865
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaForTokenClassification
import torch from typing import Optional, Union from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn @add_start_docstrings('\n Nezha Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', NEZHA_START_DOCSTRING) class NezhaForTokenClassification(NezhaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.nezha = NezhaModel(config, add_pooling_layer=False) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n Nezha Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', NEZHA_START_DOCSTRING) class NezhaForTokenClassification(NezhaPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
6
1
31
4
25
3
4
0.09
1
5
2
0
2
4
2
132
70
9
56
26
36
5
23
14
20
5
3
1
7
1,866
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaIntermediate
from ....activations import ACT2FN from torch import nn import torch class NezhaIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class NezhaIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
1,867
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaLMPredictionHead
from torch import nn import torch class NezhaLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = NezhaPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states
class NezhaLMPredictionHead(nn.Module): def __init__(self, config): pass def _tie_weights(self): pass def forward(self, hidden_states): pass
4
0
6
1
4
1
1
0.23
1
2
1
0
3
3
3
13
21
5
13
7
9
3
13
7
9
1
1
0
3
1,868
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaLayer
from ....utils.deprecation import deprecate_kwarg from ....pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ....modeling_layers import GradientCheckpointingLayer import torch from ....cache_utils import Cache from typing import Optional, Union class NezhaLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = NezhaAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = NezhaAttention(config) self.intermediate = NezhaIntermediate(config) self.output = NezhaOutput(config) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: self_attn_past_key_value = past_key_values[:2] if past_key_values is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_values=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_values[-2:] if past_key_values is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class NezhaLayer(GradientCheckpointingLayer): def __init__(self, config): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass def feed_forward_chunk(self, attention_output): pass
5
0
27
2
23
2
4
0.1
1
7
3
0
3
8
3
13
84
9
70
32
57
7
41
23
37
7
1
2
11
1,869
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaModel
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ....cache_utils import Cache from typing import Optional, Union @add_start_docstrings('The bare Nezha Model transformer outputting raw hidden-states without any specific head on top.', NEZHA_START_DOCSTRING) class NezhaModel(NezhaPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = NezhaEmbeddings(config) self.encoder = NezhaEncoder(config) self.pooler = NezhaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: """ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
@add_start_docstrings('The bare Nezha Model transformer outputting raw hidden-states without any specific head on top.', NEZHA_START_DOCSTRING) class NezhaModel(NezhaPreTrainedModel): ''' The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. ''' def __init__(self, config, add_pooling_layer=True): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: ''' encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). ''' pass
9
3
30
3
20
7
5
0.4
1
8
4
0
5
4
5
135
171
24
105
40
79
42
55
25
49
18
3
2
24
1,870
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaOnlyMLMHead
from torch import nn import torch class NezhaOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = NezhaLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores
class NezhaOnlyMLMHead(nn.Module): def __init__(self, config): pass def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: pass
3
0
3
0
3
0
1
0
1
3
1
0
2
1
2
12
8
1
7
5
4
0
7
5
4
1
1
0
2
1,871
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaOnlyNSPHead
from torch import nn class NezhaOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score
class NezhaOnlyNSPHead(nn.Module): def __init__(self, config): pass def forward(self, pooled_output): pass
3
0
3
0
3
0
1
0
1
1
0
0
2
1
2
12
8
1
7
5
4
0
7
5
4
1
1
0
2
1,872
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaOutput
from torch import nn import torch class NezhaOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class NezhaOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
1,873
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaPooler
from torch import nn import torch class NezhaPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class NezhaPooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
5
1
1
0.2
1
2
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
1,874
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaPreTrainedModel
from torch import nn from ....modeling_utils import PreTrainedModel from .configuration_nezha import NezhaConfig class NezhaPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: NezhaConfig base_model_prefix = 'nezha' supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class NezhaPreTrainedModel(PreTrainedModel): ''' An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. ''' def _init_weights(self, module): '''Initialize the weights''' pass
2
2
15
0
12
3
6
0.41
1
0
0
8
1
0
1
130
26
2
17
6
15
7
15
6
13
6
2
2
6
1,875
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaPreTrainingHeads
from torch import nn class NezhaPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = NezhaLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score)
class NezhaPreTrainingHeads(nn.Module): def __init__(self, config): pass def forward(self, sequence_output, pooled_output): pass
3
0
4
0
4
0
1
0
1
2
1
0
2
2
2
12
10
1
9
7
6
0
9
7
6
1
1
0
2
1,876
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaPredictionHeadTransform
from torch import nn from ....activations import ACT2FN import torch class NezhaPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class NezhaPredictionHeadTransform(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
7
0
7
0
2
0
1
3
0
0
2
3
2
12
15
1
14
6
11
0
13
6
10
2
1
1
3
1,877
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaRelativePositionsEncoding
from torch import nn import math import torch class NezhaRelativePositionsEncoding(nn.Module): """Implement the Functional Relative Position Encoding""" def __init__(self, length, depth, max_relative_position=127): super().__init__() vocab_size = max_relative_position * 2 + 1 range_vec = torch.arange(length) range_mat = range_vec.repeat(length).view(length, length) distance_mat = range_mat - torch.t(range_mat) distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position) final_mat = distance_mat_clipped + max_relative_position embeddings_table = torch.zeros(vocab_size, depth) position = torch.arange(0, vocab_size, dtype=torch.int64).float().unsqueeze(1) div_term = torch.exp(torch.arange(0, depth, 2).float() * (-math.log(10000.0) / depth)) embeddings_table[:, 0::2] = torch.sin(position * div_term) embeddings_table[:, 1::2] = torch.cos(position * div_term) flat_relative_positions_matrix = final_mat.view(-1) one_hot_relative_positions_matrix = torch.nn.functional.one_hot(flat_relative_positions_matrix, num_classes=vocab_size).float() positions_encoding = torch.matmul(one_hot_relative_positions_matrix, embeddings_table) my_shape = list(final_mat.size()) my_shape.append(depth) positions_encoding = positions_encoding.view(my_shape) self.register_buffer('positions_encoding', positions_encoding, persistent=False) def forward(self, length): return self.positions_encoding[:length, :length, :]
class NezhaRelativePositionsEncoding(nn.Module): '''Implement the Functional Relative Position Encoding''' def __init__(self, length, depth, max_relative_position=127): pass def forward(self, length): pass
3
1
13
1
12
0
1
0.04
1
2
0
0
2
0
2
12
30
4
25
16
22
1
23
16
20
1
1
0
2
1,878
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaSelfAttention
from torch import nn import math import torch from ....cache_utils import Cache from typing import Optional, Union from ....utils.deprecation import deprecate_kwarg class NezhaSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.relative_positions_encoding = NezhaRelativePositionsEncoding(length=config.max_position_embeddings, depth=self.attention_head_size, max_relative_position=config.max_relative_position) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_values is not None: key_layer = past_key_values[0] value_layer = past_key_values[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_values is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_values[0], key_layer], dim=2) value_layer = torch.cat([past_key_values[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_values = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) batch_size, num_attention_heads, from_seq_length, to_seq_length = attention_scores.size() relations_keys = self.relative_positions_encoding(to_seq_length) query_layer_t = query_layer.permute(2, 0, 1, 3) query_layer_r = query_layer_t.contiguous().view(from_seq_length, batch_size * num_attention_heads, self.attention_head_size) key_position_scores = torch.matmul(query_layer_r, relations_keys.permute(0, 2, 1)) key_position_scores_r = key_position_scores.view(from_seq_length, batch_size, num_attention_heads, from_seq_length) key_position_scores_r_t = key_position_scores_r.permute(1, 2, 0, 3) attention_scores = attention_scores + key_position_scores_r_t attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) relations_values = self.relative_positions_encoding(to_seq_length) attention_probs_t = attention_probs.permute(2, 0, 1, 3) attentions_probs_r = attention_probs_t.contiguous().view(from_seq_length, batch_size * num_attention_heads, to_seq_length) value_position_scores = torch.matmul(attentions_probs_r, relations_values) value_position_scores_r = value_position_scores.view(from_seq_length, batch_size, num_attention_heads, self.attention_head_size) value_position_scores_r_t = value_position_scores_r.permute(1, 2, 0, 3) context_layer = context_layer + value_position_scores_r_t context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_values,) return outputs
class NezhaSelfAttention(nn.Module): def __init__(self, config): pass def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass
5
0
43
6
31
6
4
0.18
1
6
1
0
3
9
3
13
133
21
95
46
82
17
68
37
64
9
1
1
12
1,879
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/nezha/modeling_nezha.py
transformers.models.deprecated.nezha.modeling_nezha.NezhaSelfOutput
import torch from torch import nn class NezhaSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class NezhaSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
1,880
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/configuration_open_llama.py
transformers.models.deprecated.open_llama.configuration_open_llama.OpenLlamaConfig
from ....configuration_utils import PretrainedConfig class OpenLlamaConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`OpenLlamaModel`]. It is used to instantiate an Open-Llama model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [s-JoL/Open-Llama-V1](https://huggingface.co/s-JoL/Open-Llama-V1). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Open-Llama model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OpenLlamaModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings(`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. Example: ```python >>> from transformers import OpenLlamaModel, OpenLlamaConfig >>> # Initializing a Open-Llama open_llama-7b style configuration >>> configuration = OpenLlamaConfig() >>> # Initializing a model from the open_llama-7b style configuration >>> model = OpenLlamaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'open-llama' def __init__(self, vocab_size=100000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, use_memory_efficient_attention=True, hidden_dropout_prob=0.1, attention_dropout_prob=0.1, use_stable_embedding=True, shared_input_output_embedding=True, rope_theta=10000.0, rope_scaling=None, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.use_memory_efficient_attention = kwargs.pop('use_memorry_efficient_attention', use_memory_efficient_attention) self.hidden_dropout_prob = hidden_dropout_prob self.attention_dropout_prob = attention_dropout_prob self.use_stable_embedding = use_stable_embedding self.shared_input_output_embedding = shared_input_output_embedding self.rope_theta = rope_theta self.rope_scaling = rope_scaling self._rope_scaling_validation() super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError(f'`rope_scaling` must be a dictionary with two fields, `type` and `factor`, got {self.rope_scaling}') rope_scaling_type = self.rope_scaling.get('type', None) rope_scaling_factor = self.rope_scaling.get('factor', None) if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']: raise ValueError(f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
class OpenLlamaConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`OpenLlamaModel`]. It is used to instantiate an Open-Llama model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [s-JoL/Open-Llama-V1](https://huggingface.co/s-JoL/Open-Llama-V1). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Open-Llama model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OpenLlamaModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings(`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. Example: ```python >>> from transformers import OpenLlamaModel, OpenLlamaConfig >>> # Initializing a Open-Llama open_llama-7b style configuration >>> configuration = OpenLlamaConfig() >>> # Initializing a model from the open_llama-7b style configuration >>> model = OpenLlamaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=100000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, use_memory_efficient_attention=True, hidden_dropout_prob=0.1, attention_dropout_prob=0.1, use_stable_embedding=True, shared_input_output_embedding=True, rope_theta=10000.0, rope_scaling=None, **kwargs): pass def _rope_scaling_validation(self): ''' Validate the `rope_scaling` configuration. ''' pass
3
2
36
1
34
2
3
0.81
1
4
0
0
2
17
2
34
138
13
69
47
42
56
33
23
30
5
2
1
6
1,881
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaAttention
from .configuration_open_llama import OpenLlamaConfig import torch from torch import nn from ....cache_utils import Cache import math from typing import Optional, Union from ....utils.deprecation import deprecate_kwarg class OpenLlamaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: OpenLlamaConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings self.dropout_prob = config.attention_dropout_prob self.rope_theta = config.rope_theta if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self._init_rope() def _init_rope(self): if self.config.rope_scaling is None: self.rotary_emb = OpenLlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta) else: scaling_type = self.config.rope_scaling['type'] scaling_factor = self.config.rope_scaling['factor'] if scaling_type == 'linear': self.rotary_emb = OpenLlamaLinearScalingRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta) elif scaling_type == 'dynamic': self.rotary_emb = OpenLlamaDynamicNTKScalingRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta) else: raise ValueError(f'Unknown RoPE scaling type {scaling_type}') def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_values is not None: kv_seq_len += past_key_values[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_values is not None: key_states = torch.cat([past_key_values[0], key_states], dim=2) value_states = torch.cat([past_key_values[1], value_states], dim=2) past_key_values = (key_states, value_states) if use_cache else None if self.config.use_memory_efficient_attention and xops is not None and self.training: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = xops.memory_efficient_attention(query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask(), p=self.dropout_prob) else: attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}') attn_weights = attn_weights + attention_mask attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device)) attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_values)
class OpenLlamaAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config: OpenLlamaConfig): pass def _init_rope(self): pass def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: pass
6
1
31
4
27
1
4
0.04
1
9
4
0
4
12
4
14
129
18
107
35
94
4
66
27
61
10
1
3
17
1,882
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaDecoderLayer
from ....cache_utils import Cache from typing import Optional, Union from .configuration_open_llama import OpenLlamaConfig import torch from ....modeling_layers import GradientCheckpointingLayer from ....utils.deprecation import deprecate_kwarg class OpenLlamaDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: OpenLlamaConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = OpenLlamaAttention(config=config) self.mlp = OpenLlamaMLP(hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, dropout_prob=config.hidden_dropout_prob) self.input_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs
class OpenLlamaDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: OpenLlamaConfig): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states ''' pass
4
1
33
4
21
8
2
0.35
1
7
4
0
2
5
2
12
67
9
43
19
32
15
23
11
20
3
1
1
4
1,883
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaDynamicNTKScalingRotaryEmbedding
import torch class OpenLlamaDynamicNTKScalingRotaryEmbedding(OpenLlamaRotaryEmbedding): """OpenLlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len if seq_len > self.max_position_embeddings: base = self.base * (self.scaling_factor * seq_len / self.max_position_embeddings - (self.scaling_factor - 1)) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / self.dim) self.register_buffer('inv_freq', inv_freq, persistent=False) t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False) self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
class OpenLlamaDynamicNTKScalingRotaryEmbedding(OpenLlamaRotaryEmbedding): '''OpenLlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla''' def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): pass def _set_cos_sin_cache(self, seq_len, device, dtype): pass
3
1
10
2
8
1
2
0.12
1
1
0
0
2
3
2
15
24
5
17
10
14
2
15
10
12
2
2
1
3
1,884
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaForCausalLM
from torch import nn from typing import Optional, Union from ....cache_utils import Cache from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ....modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss class OpenLlamaForCausalLM(OpenLlamaPreTrainedModel): def __init__(self, config): super().__init__(config) self.model = OpenLlamaModel(config) if config.shared_input_output_embedding: self.lm_head = None else: self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, CausalLMOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, OpenLlamaForCausalLM >>> model = OpenLlamaForCausalLM.from_pretrained("openlm-research/open_llama_7b") >>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.shared_input_output_embedding: logits = torch.einsum('blh,vh->blv', hidden_states.to(self.model.embed_tokens.weight.device), self.model.embed_tokens.weight) else: logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs): if past_key_values is not None: past_length = past_key_values.get_seq_length() if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] position_ids = kwargs.get('position_ids') if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} model_inputs.update({'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'attention_mask': attention_mask}) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past
class OpenLlamaForCausalLM(OpenLlamaPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, CausalLMOutputWithPast]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, OpenLlamaForCausalLM >>> model = OpenLlamaForCausalLM.from_pretrained("openlm-research/open_llama_7b") >>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs): pass @staticmethod def _reorder_cache(past_key_values, beam_idx): pass
8
1
16
2
11
3
2
0.25
1
6
2
0
9
2
10
140
172
27
116
43
88
29
66
27
55
8
3
2
24
1,885
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaForSequenceClassification
from typing import Optional, Union from ....cache_utils import Cache import torch from ....modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from torch import nn @add_start_docstrings('\n The LLaMa Model transformer with a sequence classification head on top (linear layer).\n\n [`OpenLlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal\n models (e.g. GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', OPEN_LLAMA_START_DOCSTRING) class OpenLlamaForSequenceClassification(OpenLlamaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = OpenLlamaModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) self.post_init() @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model(input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
null
5
1
25
2
21
2
5
0.09
1
7
2
0
4
3
4
134
106
11
87
30
69
8
50
17
45
16
3
3
19
1,886
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaLinearScalingRotaryEmbedding
import torch class OpenLlamaLinearScalingRotaryEmbedding(OpenLlamaRotaryEmbedding): """OpenLlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) t = t / self.scaling_factor freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False) self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
class OpenLlamaLinearScalingRotaryEmbedding(OpenLlamaRotaryEmbedding): '''OpenLlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev''' def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): pass def _set_cos_sin_cache(self, seq_len, device, dtype): pass
3
1
7
1
6
1
1
0.17
1
1
0
0
2
2
2
15
17
3
12
8
9
2
12
8
9
1
2
0
2
1,887
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaMLP
from torch import nn from ....activations import ACT2FN class OpenLlamaMLP(nn.Module): def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, dropout_prob: float): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.act_fn = ACT2FN[hidden_act] self.dropout = nn.Dropout(dropout_prob) def forward(self, x): out = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return self.dropout(out)
class OpenLlamaMLP(nn.Module): def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, dropout_prob: float): pass def forward(self, x): pass
3
0
8
0
8
0
1
0
1
4
0
0
2
5
2
12
18
1
17
15
8
0
11
9
8
1
1
0
2
1,888
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaModel
from ....modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ....modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast from torch import nn from ....cache_utils import Cache from typing import Optional, Union from .configuration_open_llama import OpenLlamaConfig from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch @add_start_docstrings('The bare Open-Llama Model outputting raw hidden-states without any specific head on top.', OPEN_LLAMA_START_DOCSTRING) class OpenLlamaModel(OpenLlamaPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OpenLlamaDecoderLayer`] Args: config: OpenLlamaConfig """ def __init__(self, config: OpenLlamaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) if config.use_stable_embedding: self.embed_layer_norm = nn.LayerNorm(config.hidden_size) else: self.embed_layer_norm = None self.layers = nn.ModuleList([OpenLlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.norm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.post_init() @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') seq_length_with_past = seq_length past_key_values_length = 0 if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if past_key_values is not None: past_key_values_length = past_key_values.get_seq_length() seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange(past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self.embed_layer_norm: inputs_embeds = self.embed_layer_norm(inputs_embeds) if self.config.use_memory_efficient_attention and self.training: attention_mask = None elif attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device) input_shape = (batch_size, seq_length) attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values[idx] if past_key_values is not None else None, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns)
@add_start_docstrings('The bare Open-Llama Model outputting raw hidden-states without any specific head on top.', OPEN_LLAMA_START_DOCSTRING) class OpenLlamaModel(OpenLlamaPreTrainedModel): ''' Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OpenLlamaDecoderLayer`] Args: config: OpenLlamaConfig ''' def __init__(self, config: OpenLlamaConfig): pass @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPast]: pass
5
1
36
5
30
1
9
0.08
1
11
4
0
4
7
4
134
156
26
120
38
103
10
72
26
67
30
3
2
34
1,889
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaPreTrainedModel
from torch import nn from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ....modeling_utils import PreTrainedModel import torch from .configuration_open_llama import OpenLlamaConfig @add_start_docstrings('The bare Open-Llama Model outputting raw hidden-states without any specific head on top.', OPEN_LLAMA_START_DOCSTRING) class OpenLlamaPreTrainedModel(PreTrainedModel): config: OpenLlamaConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['OpenLlamaDecoderLayer'] def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): if self.config.use_stable_embedding: torch.nn.init.xavier_normal_(module.weight.data) else: module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
@add_start_docstrings('The bare Open-Llama Model outputting raw hidden-states without any specific head on top.', OPEN_LLAMA_START_DOCSTRING) class OpenLlamaPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
13
0
13
0
6
0
1
0
0
3
1
0
1
130
19
1
18
7
16
0
16
7
14
6
2
2
6
1,890
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaRMSNorm
import torch from torch import nn class OpenLlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ OpenLlamaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
class OpenLlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): ''' OpenLlamaRMSNorm is equivalent to T5LayerNorm ''' pass def forward(self, hidden_states): pass def extra_repr(self): pass
4
1
5
0
4
1
1
0.23
1
2
0
0
3
2
3
13
18
2
13
8
9
3
13
8
9
1
1
0
3
1,891
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/open_llama/modeling_open_llama.py
transformers.models.deprecated.open_llama.modeling_open_llama.OpenLlamaRotaryEmbedding
from torch import nn import torch class OpenLlamaRotaryEmbedding(nn.Module): inv_freq: torch.Tensor cos_cached: torch.Tensor sin_cached: torch.Tensor def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / self.dim) self.register_buffer('inv_freq', inv_freq, persistent=False) self._set_cos_sin_cache(seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False) self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return (self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype))
class OpenLlamaRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): pass def _set_cos_sin_cache(self, seq_len, device, dtype): pass def forward(self, x, seq_len=None): pass
4
0
10
1
8
1
1
0.12
1
1
0
2
3
4
3
13
34
6
25
12
21
3
20
12
16
2
1
1
4
1,892
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/configuration_qdqbert.py
transformers.models.deprecated.qdqbert.configuration_qdqbert.QDQBertConfig
from ....configuration_utils import PretrainedConfig class QDQBertConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`QDQBertModel`]. It is used to instantiate an QDQBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BERT [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the QDQBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`QDQBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`QDQBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Examples: ```python >>> from transformers import QDQBertModel, QDQBertConfig >>> # Initializing a QDQBERT google-bert/bert-base-uncased style configuration >>> configuration = QDQBertConfig() >>> # Initializing a model from the google-bert/bert-base-uncased style configuration >>> model = QDQBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'qdqbert' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache
class QDQBertConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`QDQBertModel`]. It is used to instantiate an QDQBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BERT [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the QDQBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`QDQBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`QDQBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Examples: ```python >>> from transformers import QDQBertModel, QDQBertConfig >>> # Initializing a QDQBERT google-bert/bert-base-uncased style configuration >>> configuration = QDQBertConfig() >>> # Initializing a model from the google-bert/bert-base-uncased style configuration >>> model = QDQBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): pass
2
1
35
1
34
0
1
1.39
1
1
0
0
1
13
1
33
97
11
36
35
15
50
17
16
15
1
2
0
1
1,893
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertAttention
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from torch import nn from ....utils.deprecation import deprecate_kwarg class QDQBertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = QDQBertSelfAttention(config) self.output = QDQBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class QDQBertAttention(nn.Module): def __init__(self, config): pass def prune_heads(self, heads): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, output_attentions=False): pass
5
0
15
1
13
1
1
0.07
1
4
2
0
3
3
3
13
47
4
41
20
28
3
22
11
18
2
1
1
4
1,894
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertEmbeddings
import torch from typing import Optional, Union from torch import nn class QDQBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class QDQBertEmbeddings(nn.Module): '''Construct the embeddings from word, position and token_type embeddings.''' def __init__(self, config): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: pass
3
1
29
3
23
3
4
0.15
1
3
0
0
2
6
2
12
62
8
47
23
37
7
34
16
31
7
1
2
8
1,895
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertEncoder
from torch import nn from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput class QDQBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([QDQBertLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values[i] if past_key_values is not None else None, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
class QDQBertEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True): pass
3
0
44
3
41
0
9
0
1
6
2
0
2
3
2
12
89
7
82
26
67
0
34
14
31
16
1
3
17
1,896
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertForMaskedLM
import torch from typing import Optional, Union from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_pytorch_quantization_available, logging, replace_return_docstrings, requires_backends from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput @add_start_docstrings('QDQBERT Model with a `language modeling` head on top.', QDQBERT_START_DOCSTRING) class QDQBertForMaskedLM(QDQBertPreTrainedModel): _tied_weights_keys = ['predictions.decoder.weight', 'predictions.decoder.bias'] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `QDQBertForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.bert = QDQBertModel(config, add_pooling_layer=False) self.cls = QDQBertOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor]=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] if self.config.pad_token_id is None: raise ValueError('The PAD token should be defined for generation') attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full((effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {'input_ids': input_ids, 'attention_mask': attention_mask}
@add_start_docstrings('QDQBERT Model with a `language modeling` head on top.', QDQBERT_START_DOCSTRING) class QDQBertForMaskedLM(QDQBertPreTrainedModel): def __init__(self, config): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` ''' pass def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor]=None, **model_kwargs): pass
9
1
18
2
14
2
2
0.11
1
6
3
0
5
2
5
135
105
17
80
35
52
9
36
18
30
5
3
1
11
1,897
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertForMultipleChoice
from torch import nn import torch from typing import Optional, Union from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_pytorch_quantization_available, logging, replace_return_docstrings, requires_backends from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput @add_start_docstrings('\n Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', QDQBERT_START_DOCSTRING) class QDQBertForMultipleChoice(QDQBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = QDQBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', QDQBERT_START_DOCSTRING) class QDQBertForMultipleChoice(QDQBertPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) ''' pass
6
1
37
5
29
4
6
0.11
1
4
2
0
2
3
2
132
82
10
65
27
44
7
28
14
25
11
3
1
12
1,898
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertForNextSentencePrediction
import torch from typing import Optional, Union from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_pytorch_quantization_available, logging, replace_return_docstrings, requires_backends import warnings from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput @add_start_docstrings('Bert Model with a `next sentence prediction (classification)` head on top.', QDQBERT_START_DOCSTRING) class QDQBertForNextSentencePrediction(QDQBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = QDQBertModel(config) self.cls = QDQBertOnlyNSPHead(config) self.post_init() @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, NextSentencePredictorOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring). Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Example: ```python >>> from transformers import AutoTokenizer, QDQBertForNextSentencePrediction >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> model = QDQBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") >>> outputs = model(**encoding, labels=torch.LongTensor([1])) >>> logits = outputs.logits >>> assert logits[0, 0] < logits[0, 1] # next sentence was random ```""" if 'next_sentence_label' in kwargs: warnings.warn('The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning) labels = kwargs.pop('next_sentence_label') return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return (next_sentence_loss,) + output if next_sentence_loss is not None else output return NextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('Bert Model with a `next sentence prediction (classification)` head on top.', QDQBERT_START_DOCSTRING) class QDQBertForNextSentencePrediction(QDQBertPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, NextSentencePredictorOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring). Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Example: ```python >>> from transformers import AutoTokenizer, QDQBertForNextSentencePrediction >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> model = QDQBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") >>> outputs = model(**encoding, labels=torch.LongTensor([1])) >>> logits = outputs.logits >>> assert logits[0, 0] < logits[0, 1] # next sentence was random ```''' pass
6
1
46
9
27
11
4
0.38
1
6
3
0
2
2
2
132
95
18
56
25
38
21
22
11
19
6
3
1
7
1,899
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertForQuestionAnswering
from typing import Optional, Union from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_pytorch_quantization_available, logging, replace_return_docstrings, requires_backends from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn import torch @add_start_docstrings('\n QDQBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', QDQBERT_START_DOCSTRING) class QDQBertForQuestionAnswering(QDQBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = QDQBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]: """ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n QDQBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', QDQBERT_START_DOCSTRING) class QDQBertForQuestionAnswering(QDQBertPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]: ''' start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. ''' pass
6
1
41
5
30
7
4
0.19
1
4
2
0
2
3
2
132
90
10
67
30
45
13
32
16
29
7
3
2
8