id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
4,000
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
transformers.models.moonshine.modular_moonshine.MoonshineRotaryEmbedding
from ..glm.modeling_glm import GlmAttention, GlmRotaryEmbedding, apply_rotary_pos_emb class MoonshineRotaryEmbedding(GlmRotaryEmbedding): pass
class MoonshineRotaryEmbedding(GlmRotaryEmbedding): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
13
2
0
2
1
1
0
2
1
1
0
2
0
0
4,001
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/configuration_moshi.py
transformers.models.moshi.configuration_moshi.MoshiConfig
from ..auto.configuration_auto import AutoConfig from ...configuration_utils import PretrainedConfig class MoshiConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MoshiModel`]. It is used to instantiate a Moshi model according to the specified arguments, defining the audio encoder, Moshi depth decoder and Moshi decoder configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Moshiko model, e.g. [kmhf/hf-moshiko](https://huggingface.co/kmhf/hf-moshiko) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the MoshiDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MoshiDecoder`]. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the layers and the pooler layer of the main decoder. num_hidden_layers (`int`, *optional*, defaults to 32): Number of decoder layers. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the main decoder block. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. audio_vocab_size (`int`, *optional*): Vocabulary size of the audio part of model. Defines the number of different tokens that can be represented by the `audio_codes` passed when calling the Moshi models. max_position_embeddings (`int`, *optional*, defaults to 3000): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`): The attention head dimension. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. sliding_window (`int`, *optional*, defaults to 3000): Sliding window attention window size. If not specified, will default to `3000`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ffn_dim (`int`, *optional*, defaults to 22528): Dimensionality of the "intermediate" (often named feed-forward) layer in the main decoder block. Must be even. rms_norm_eps (`float`, *optional*, defaults to 1e-08): The epsilon used by the rms normalization layers. num_codebooks (`int`, *optional*, defaults to 8): The number of audio codebooks for each audio channels. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings kwargs (*optional*): Dictionary of keyword arguments. Notably: - **audio_encoder_config** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. - **depth__config** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the depth decoder config. Example: ```python >>> from transformers import ( ... MoshiConfig, ... MoshiForConditionalGeneration, ... ) >>> configuration = MoshiConfig() >>> # Initializing a MoshiForConditionalGeneration (with random weights) from the kmhf/hf-moshiko style configuration >>> model = MoshiForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # Saving the model, including its configuration >>> model.save_pretrained("kmhf/hf-moshiko") >>> # loading model and config from pretrained folder >>> moshi_config = MoshiConfig.from_pretrained("kmhf/hf-moshiko") >>> model = MoshiForConditionalGeneration.from_pretrained("kmhf/hf-moshiko", config=moshi_config) ```""" model_type = 'moshi' keys_to_ignore_at_inference = ['past_key_values'] sub_configs = {'audio_encoder_config': AutoConfig, 'depth_decoder_config': MoshiDepthConfig} def __init__(self, vocab_size=32000, hidden_size=4096, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, audio_vocab_size=None, max_position_embeddings=3000, rope_theta=10000.0, hidden_act='silu', head_dim=None, initializer_range=0.02, use_cache=True, sliding_window=3000, attention_dropout=0.0, ffn_dim=22528, rms_norm_eps=1e-08, num_codebooks=8, tie_word_embeddings=False, **kwargs): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads if num_key_value_heads is not None else num_attention_heads self.max_position_embeddings = max_position_embeddings self.rope_theta = rope_theta self.hidden_act = hidden_act self.head_dim = head_dim or hidden_size // num_attention_heads self.initializer_range = initializer_range self.use_cache = use_cache self.sliding_window = sliding_window self.attention_dropout = attention_dropout if ffn_dim % 2 == 1: raise ValueError(f'`ffn_dim={ffn_dim}` must be even.') self.ffn_dim = ffn_dim self.rms_norm_eps = rms_norm_eps self.num_codebooks = num_codebooks audio_encoder_config = kwargs.pop('audio_encoder_config', {}) audio_encoder_model_type = audio_encoder_config.pop('model_type', 'mimi') self.audio_encoder_config = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config) if self.num_codebooks > self.audio_encoder_config.num_codebooks: raise ValueError(f'`num_codebooks={num_codebooks}` is greater than the maximum number of codebooks that the audio encoder can deal with ({self.audio_encoder_config.num_codebooks}). Please lower it.') self.audio_vocab_size = self.audio_encoder_config.codebook_size if audio_vocab_size is None else audio_vocab_size depth_decoder_config = kwargs.pop('depth_decoder_config', {}) depth_decoder_config.update({'audio_vocab_size': self.audio_vocab_size, 'input_size': hidden_size, 'vocab_size': vocab_size, 'num_codebooks': num_codebooks}) self.depth_decoder_config = MoshiDepthConfig(**depth_decoder_config) super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) @property def sampling_rate(self): return self.audio_encoder_config.sampling_rate @classmethod def from_audio_encoder_config(cls, audio_encoder_config: PretrainedConfig, **kwargs): """ Instantiate a [`MoshiConfig`] (or a derived class) from an audio encoder configuration. Returns: [`MoshiConfig`]: An instance of a configuration object """ return cls(audio_encoder_config=audio_encoder_config.to_dict(), **kwargs)
class MoshiConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MoshiModel`]. It is used to instantiate a Moshi model according to the specified arguments, defining the audio encoder, Moshi depth decoder and Moshi decoder configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Moshiko model, e.g. [kmhf/hf-moshiko](https://huggingface.co/kmhf/hf-moshiko) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the MoshiDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MoshiDecoder`]. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the layers and the pooler layer of the main decoder. num_hidden_layers (`int`, *optional*, defaults to 32): Number of decoder layers. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the main decoder block. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. audio_vocab_size (`int`, *optional*): Vocabulary size of the audio part of model. Defines the number of different tokens that can be represented by the `audio_codes` passed when calling the Moshi models. max_position_embeddings (`int`, *optional*, defaults to 3000): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`): The attention head dimension. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. sliding_window (`int`, *optional*, defaults to 3000): Sliding window attention window size. If not specified, will default to `3000`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ffn_dim (`int`, *optional*, defaults to 22528): Dimensionality of the "intermediate" (often named feed-forward) layer in the main decoder block. Must be even. rms_norm_eps (`float`, *optional*, defaults to 1e-08): The epsilon used by the rms normalization layers. num_codebooks (`int`, *optional*, defaults to 8): The number of audio codebooks for each audio channels. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings kwargs (*optional*): Dictionary of keyword arguments. Notably: - **audio_encoder_config** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. - **depth__config** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the depth decoder config. Example: ```python >>> from transformers import ( ... MoshiConfig, ... MoshiForConditionalGeneration, ... ) >>> configuration = MoshiConfig() >>> # Initializing a MoshiForConditionalGeneration (with random weights) from the kmhf/hf-moshiko style configuration >>> model = MoshiForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # Saving the model, including its configuration >>> model.save_pretrained("kmhf/hf-moshiko") >>> # loading model and config from pretrained folder >>> moshi_config = MoshiConfig.from_pretrained("kmhf/hf-moshiko") >>> model = MoshiForConditionalGeneration.from_pretrained("kmhf/hf-moshiko", config=moshi_config) ```''' def __init__(self, vocab_size=32000, hidden_size=4096, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, audio_vocab_size=None, max_position_embeddings=3000, rope_theta=10000.0, hidden_act='silu', head_dim=None, initializer_range=0.02, use_cache=True, sliding_window=3000, attention_dropout=0.0, ffn_dim=22528, rms_norm_eps=1e-08, num_codebooks=8, tie_word_embeddings=False, **kwargs): pass @property def sampling_rate(self): pass @classmethod def from_audio_encoder_config(cls, audio_encoder_config: PretrainedConfig, **kwargs): ''' Instantiate a [`MoshiConfig`] (or a derived class) from an audio encoder configuration. Returns: [`MoshiConfig`]: An instance of a configuration object ''' pass
6
2
29
3
24
2
2
1.04
1
4
2
0
2
19
3
3
182
23
78
56
47
81
37
29
33
5
1
1
7
4,002
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/configuration_moshi.py
transformers.models.moshi.configuration_moshi.MoshiDepthConfig
from ...configuration_utils import PretrainedConfig class MoshiDepthConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MoshiDepthDecoder`]. It is used to instantiate a Moshi depth decoder model according to the specified arguments, defining the Moshi depth decoder config. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the MoshiDepthDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MoshiDepthDecoder`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer of the depth decoder. input_size (`int`, *optional*, defaults to 4096): Dimensionality of the input hidden states. Used to connect the main decoder to the depth decoder. num_hidden_layers (`int`, *optional*, defaults to 6): Number of depth decoder layers. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the depth decoder block. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. audio_vocab_size (`int`, *optional*, defaults to 2048): Vocabulary size of the audio part of model. Defines the number of different tokens that can be represented by the `audio_codes` passed when calling the Moshi models. max_position_embeddings (`int`, *optional*, defaults to 9): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the depth decoder. head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`): The attention head dimension. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. sliding_window (`int`, *optional*, defaults to 8): Sliding window attention window size. If not specified, will default to `8`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ffn_dim (`int`, *optional*, defaults to 5632): Dimensionality of the "intermediate" (often named feed-forward) layer in the depth decoder block. Must be even. rms_norm_eps (`float`, *optional*, defaults to 1e-08): The epsilon used by the rms normalization layers. num_codebooks (`int`, *optional*, defaults to 8): The number of audio codebooks for each audio channels. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings kwargs (*optional*): Dictionary of keyword arguments. Notably: - **audio_encoder_config** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. Example: ```python >>> from transformers import ( ... MoshiDepthConfig, ... MoshiDepthDecoder, ... ) >>> configuration = MoshiDepthConfig() >>> # Initializing a MoshiDepthDecoder (with random weights) from the kmhf/hf-moshiko style configuration >>> model = MoshiDepthDecoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'moshi_depth' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=32000, hidden_size=1024, input_size=4096, num_hidden_layers=6, num_attention_heads=16, num_key_value_heads=None, audio_vocab_size=2048, max_position_embeddings=9, hidden_act='silu', head_dim=None, initializer_range=0.02, use_cache=True, sliding_window=8, attention_dropout=0.0, ffn_dim=5632, rms_norm_eps=1e-08, num_codebooks=8, tie_word_embeddings=False, **kwargs): self.vocab_size = vocab_size self.hidden_size = hidden_size self.input_size = input_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads if num_key_value_heads is not None else num_attention_heads self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.head_dim = head_dim or hidden_size // num_attention_heads self.initializer_range = initializer_range self.use_cache = use_cache self.sliding_window = sliding_window self.attention_dropout = attention_dropout if ffn_dim % 2 == 1: raise ValueError(f'`ffn_dim={ffn_dim}` must be even.') self.ffn_dim = ffn_dim self.rms_norm_eps = rms_norm_eps self.num_codebooks = num_codebooks self.audio_vocab_size = audio_vocab_size super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
class MoshiDepthConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MoshiDepthDecoder`]. It is used to instantiate a Moshi depth decoder model according to the specified arguments, defining the Moshi depth decoder config. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the MoshiDepthDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MoshiDepthDecoder`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer of the depth decoder. input_size (`int`, *optional*, defaults to 4096): Dimensionality of the input hidden states. Used to connect the main decoder to the depth decoder. num_hidden_layers (`int`, *optional*, defaults to 6): Number of depth decoder layers. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the depth decoder block. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. audio_vocab_size (`int`, *optional*, defaults to 2048): Vocabulary size of the audio part of model. Defines the number of different tokens that can be represented by the `audio_codes` passed when calling the Moshi models. max_position_embeddings (`int`, *optional*, defaults to 9): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the depth decoder. head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`): The attention head dimension. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. sliding_window (`int`, *optional*, defaults to 8): Sliding window attention window size. If not specified, will default to `8`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ffn_dim (`int`, *optional*, defaults to 5632): Dimensionality of the "intermediate" (often named feed-forward) layer in the depth decoder block. Must be even. rms_norm_eps (`float`, *optional*, defaults to 1e-08): The epsilon used by the rms normalization layers. num_codebooks (`int`, *optional*, defaults to 8): The number of audio codebooks for each audio channels. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings kwargs (*optional*): Dictionary of keyword arguments. Notably: - **audio_encoder_config** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. Example: ```python >>> from transformers import ( ... MoshiDepthConfig, ... MoshiDepthDecoder, ... ) >>> configuration = MoshiDepthConfig() >>> # Initializing a MoshiDepthDecoder (with random weights) from the kmhf/hf-moshiko style configuration >>> model = MoshiDepthDecoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=32000, hidden_size=1024, input_size=4096, num_hidden_layers=6, num_attention_heads=16, num_key_value_heads=None, audio_vocab_size=2048, max_position_embeddings=9, hidden_act='silu', head_dim=None, initializer_range=0.02, use_cache=True, sliding_window=8, attention_dropout=0.0, ffn_dim=5632, rms_norm_eps=1e-08, num_codebooks=8, tie_word_embeddings=False, **kwargs): pass
2
1
43
1
42
0
3
1.49
1
2
0
0
1
17
1
1
122
10
45
42
22
67
24
21
22
3
1
1
3
4,003
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiAttention
import torch.nn as nn import torch from ...cache_utils import Cache, DynamicCache, StaticCache from .configuration_moshi import MoshiConfig, MoshiDepthConfig from typing import Any, Optional, Union from ...utils.deprecation import deprecate_kwarg import math class MoshiAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: MoshiConfig, layer_idx: Optional[int]=None, use_flexible_linear=False, use_rope=True): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.is_causal = True self.scaling = 1 / math.sqrt(self.head_dim) if self.hidden_size % self.num_heads != 0: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.q_proj = MoshiLinear(self.hidden_size, self.num_heads * self.head_dim, config.num_codebooks, use_flexible_linear) self.k_proj = MoshiLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, config.num_codebooks, use_flexible_linear) self.v_proj = MoshiLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, config.num_codebooks, use_flexible_linear) self.o_proj = MoshiLinear(self.num_heads * self.head_dim, self.hidden_size, config.num_codebooks, use_flexible_linear) self.rotary_emb = None if use_rope: self.rope_theta = config.rope_theta self.rotary_emb = MoshiRotaryEmbedding(config) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states, cache_position) key_states = self.k_proj(hidden_states, cache_position) value_states = self.v_proj(hidden_states, cache_position) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if self.rotary_emb is not None: cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} if self.rotary_emb is not None else {'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output, cache_position) if not output_attentions: attn_weights = None return (attn_output, attn_weights)
class MoshiAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config: MoshiConfig, layer_idx: Optional[int]=None, use_flexible_linear=False, use_rope=True): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: pass
4
1
54
9
44
6
6
0.17
1
9
4
2
2
17
2
12
113
19
88
38
76
15
57
29
54
7
1
1
11
4,004
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiCausalLMOutputWithPast
import torch.nn as nn from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput, Seq2SeqLMOutput import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...cache_utils import Cache, DynamicCache, StaticCache from typing import Any, Optional, Union from dataclasses import dataclass @dataclass @auto_docstring(custom_intro='\n `MoshiForCausalLM` outputs.\n ') class MoshiCausalLMOutputWithPast(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n `MoshiForCausalLM` outputs.\n ') class MoshiCausalLMOutputWithPast(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. ''' pass
3
1
0
0
0
0
0
3.43
1
0
0
0
0
0
0
0
36
5
7
7
6
24
7
7
6
0
1
0
0
4,005
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiConditionalGenerationGenerateOutput
import torch.nn as nn from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput, Seq2SeqLMOutput import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...cache_utils import Cache, DynamicCache, StaticCache from typing import Any, Optional, Union from dataclasses import dataclass @dataclass @auto_docstring(custom_intro='\n Outputs of [`MoshiForConditionalConditionalGeneration.generate`].\n ') class MoshiConditionalGenerationGenerateOutput(ModelOutput): """ audio_sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, 1, sequence_length)`, *optional*): The generated audio waveforms. sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated text sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`): Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`): Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True`): Beam indices of generated token id at each generation step. `torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. past_key_values (`Cache`, *optional*, returned when `use_cache=True`): Contains the model cache, used to speed up decoding. Different models have a different cache format, check the model's documentation. Usually, a [`~cache_utils.Cache`] instance. audio_codes (`torch.LongTensor` of shape `(batch_size*num_return_sequences, num_codeooks, sequence_length)`, *optional*): The generated audio codes. Returned if `return_audio_codes=True`. Intermediate audio "tokens" which transforms to `audio_sequences` once passed through the audio decoder. """ audio_sequences: Optional[torch.Tensor] = None sequences: Optional[torch.LongTensor] = None sequences_scores: Optional[torch.FloatTensor] = None scores: Optional[tuple[torch.FloatTensor]] = None logits: Optional[tuple[torch.FloatTensor]] = None beam_indices: Optional[torch.LongTensor] = None attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None hidden_states: Optional[tuple[tuple[torch.FloatTensor]]] = None past_key_values: Optional[Cache] = None audio_codes: Optional[torch.LongTensor] = None
@dataclass @auto_docstring(custom_intro='\n Outputs of [`MoshiForConditionalConditionalGeneration.generate`].\n ') class MoshiConditionalGenerationGenerateOutput(ModelOutput): ''' audio_sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, 1, sequence_length)`, *optional*): The generated audio waveforms. sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated text sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`): Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`): Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True`): Beam indices of generated token id at each generation step. `torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. past_key_values (`Cache`, *optional*, returned when `use_cache=True`): Contains the model cache, used to speed up decoding. Different models have a different cache format, check the model's documentation. Usually, a [`~cache_utils.Cache`] instance. audio_codes (`torch.LongTensor` of shape `(batch_size*num_return_sequences, num_codeooks, sequence_length)`, *optional*): The generated audio codes. Returned if `return_audio_codes=True`. Intermediate audio "tokens" which transforms to `audio_sequences` once passed through the audio decoder. ''' pass
3
1
0
0
0
0
0
3.09
1
0
0
0
0
0
0
0
47
2
11
11
10
34
11
11
10
0
1
0
0
4,006
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiConditionalGenerationOutputWithPast
import torch.nn as nn from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput, Seq2SeqLMOutput import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...cache_utils import Cache, DynamicCache, StaticCache from typing import Any, Optional, Union from dataclasses import dataclass @dataclass @auto_docstring(custom_intro='\n `MoshiForConditionalGeneration` outputs.\n ') class MoshiConditionalGenerationOutputWithPast(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `text_labels` is provided): Text language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the text language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. depth_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `audio_labels` is provided): Audio language modeling loss (for next-token prediction). audio_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the audio language modeling heads. depth_past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Past key-values of the depth decoder. depth_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Hidden states of the depth decoder depth_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Depth decoder's Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None depth_loss: Optional[torch.FloatTensor] = None audio_logits: Optional[torch.FloatTensor] = None depth_past_key_values: Optional[Cache] = None depth_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None depth_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n `MoshiForConditionalGeneration` outputs.\n ') class MoshiConditionalGenerationOutputWithPast(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `text_labels` is provided): Text language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the text language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. depth_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `audio_labels` is provided): Audio language modeling loss (for next-token prediction). audio_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the audio language modeling heads. depth_past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Past key-values of the depth decoder. depth_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Hidden states of the depth decoder depth_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Depth decoder's Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
3
1
0
0
0
0
0
2.92
1
0
0
0
0
0
0
0
52
5
12
12
11
35
12
12
11
0
1
0
0
4,007
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiDecoderLayer
from .configuration_moshi import MoshiConfig, MoshiDepthConfig from ...modeling_layers import GradientCheckpointingLayer from ...utils.deprecation import deprecate_kwarg from typing import Any, Optional, Union import torch.nn as nn from ...cache_utils import Cache, DynamicCache, StaticCache import torch class MoshiDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MoshiConfig, layer_idx: int, use_flexible_linear: bool, use_rope=True): super().__init__() self.hidden_size = config.hidden_size self.use_flexible_linear = use_flexible_linear self.self_attn = MOSHI_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx, use_flexible_linear=use_flexible_linear, use_rope=use_rope) self.mlp = MoshiGatingMLP(config, use_flexible_linear) self.input_layernorm = MoshiRMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = MoshiRMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.sliding_window = config.sliding_window self._attn_implementation = config._attn_implementation @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Cache`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) if not self.use_flexible_linear else self.mlp(hidden_states, cache_position) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs
class MoshiDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MoshiConfig, layer_idx: int, use_flexible_linear: bool, use_rope=True): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Cache`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model ''' pass
4
1
40
5
24
11
3
0.43
1
8
4
0
2
8
2
12
81
11
49
24
36
21
26
14
23
4
1
1
5
4,008
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiDepthDecoder
import torch.nn as nn from ...generation import GenerationConfig, GenerationMixin from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput, Seq2SeqLMOutput import torch from ...cache_utils import Cache, DynamicCache, StaticCache from .configuration_moshi import MoshiConfig, MoshiDepthConfig from typing import Any, Optional, Union from ...modeling_attn_mask_utils import AttentionMaskConverter from torch.nn import CrossEntropyLoss class MoshiDepthDecoder(MoshiPreTrainedModel, GenerationMixin): """ Transformer depth decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoshiTransformerLayer`] Args: config: MoshiConfig """ config: MoshiDepthConfig def __init__(self, config: MoshiDepthConfig): super().__init__(config) self.text_embed_tokens = nn.Embedding(config.vocab_size + 1, config.hidden_size) self.embed_tokens = nn.ModuleList([nn.Embedding(config.audio_vocab_size + 1, config.hidden_size) for _ in range(config.num_codebooks - 1)]) self.input_projections = MoshiFlexibleLinear(config.input_size, config.hidden_size, config.num_codebooks) self.layers = nn.ModuleList([MoshiDecoderLayer(config, layer_idx, use_flexible_linear=True, use_rope=False) for layer_idx in range(config.num_hidden_layers)]) self.lm_heads = MoshiFlexibleLinear(config.hidden_size, config.audio_vocab_size, config.num_codebooks) self._attn_implementation = config._attn_implementation self.gradient_checkpointing = False self.config = config def forward(self, input_ids: Optional[torch.LongTensor]=None, last_hidden_state: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPast]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens. The first element of the sequence must the text token associated to the audio codebooks. The rest of the elements must be flatten audio codebooks. The `cache_position` argument can be used to indicate to which index is associated each token. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the main decoder. Used to contextualize `input_ids` attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert the inputs into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if use_cache and past_key_values is None and (not self.training): past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_seen_tokens = 0 if past_key_values is None else past_key_values.get_seq_length() if cache_position is None: cache_position = torch.arange(past_seen_tokens, past_seen_tokens + input_ids.shape[1], device=input_ids.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) if inputs_embeds is None: inputs_embeds = [] for position_idx in cache_position: position_idx = position_idx.item() if position_idx == 0: inputs_embeds.append(self.text_embed_tokens(input_ids[:, [position_idx]])) else: inputs_embeds.append(self.embed_tokens[position_idx - 1](input_ids[:, [position_idx - past_seen_tokens]])) inputs_embeds = torch.cat(inputs_embeds, dim=1) inputs_embeds += self.input_projections(last_hidden_state, cache_position) causal_mask = None if attention_mask is not None: causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None hidden_states = inputs_embeds for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) logits = self.lm_heads(hidden_states, cache_position) loss = None if labels is not None: logits = logits.float() loss_fct = CrossEntropyLoss() labels = labels.masked_fill(labels == self.config.audio_vocab_size, -100).reshape(-1) labels = labels.to(logits.device) loss = loss_fct(logits.reshape(-1, self.config.audio_vocab_size), labels) if not return_dict: return tuple((v for v in [loss, logits, past_key_values, all_hidden_states, all_self_attns] if v is not None)) return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=past_key_values, hidden_states=past_key_values, attentions=all_self_attns) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and past_key_values is not None: is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0] if is_padding_right: raise ValueError("You are attempting to perform batched generation with padding_side='right' this may lead to unexpected behaviour for Flash Attention version of Moshi. Make sure to call `tokenizer.padding_side = 'left'` before tokenizing the input. ") if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, sliding_window=self.config.sliding_window, is_training=self.training): return None dtype = input_tensor.dtype min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], config=self.config, past_key_values=past_key_values) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, config: MoshiDepthConfig, past_key_values: Cache): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. config (`MoshiDepthConfig`): The model's configuration class past_key_values (`Cache`): The cache class that is being used currently to generate """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) text_config = config.get_text_config() if getattr(text_config, 'use_sliding_window', True) and text_config.sliding_window is not None: is_static_sliding_cache = isinstance(past_key_values, StaticCache) and all(past_key_values.is_sliding) if not is_static_sliding_cache or sequence_length > target_length: sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= cache_position.reshape(-1, 1) - text_config.sliding_window diagonal_attend_mask.bitwise_or_(sliding_attend_mask) causal_mask *= diagonal_attend_mask causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() if attention_mask.shape[-1] > target_length: attention_mask = attention_mask[:, :target_length] mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
class MoshiDepthDecoder(MoshiPreTrainedModel, GenerationMixin): ''' Transformer depth decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoshiTransformerLayer`] Args: config: MoshiConfig ''' def __init__(self, config: MoshiDepthConfig): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, last_hidden_state: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPast]: ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens. The first element of the sequence must the text token associated to the audio codebooks. The rest of the elements must be flatten audio codebooks. The `cache_position` argument can be used to indicate to which index is associated each token. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the main decoder. Used to contextualize `input_ids` attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert the inputs into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. ''' pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, config: MoshiDepthConfig, past_key_values: Cache): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. config (`MoshiDepthConfig`): The model's configuration class past_key_values (`Cache`): The cache class that is being used currently to generate ''' pass
6
3
93
11
57
26
11
0.48
2
17
10
0
3
8
4
5
387
48
229
75
192
111
114
42
109
26
2
3
44
4,009
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiFlashAttention2
import torch.nn as nn import torch from ...cache_utils import Cache, DynamicCache, StaticCache from typing import Any, Optional, Union from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available from ...utils.deprecation import deprecate_kwarg class MoshiFlashAttention2(MoshiAttention): """ Moshi flash attention module. This module inherits from `MoshiAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if isinstance(past_key_values, StaticCache): raise ValueError('`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers') output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states, cache_position) key_states = self.k_proj(hidden_states, cache_position) value_states = self.v_proj(hidden_states, cache_position) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if self.rotary_emb is not None: cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} if self.rotary_emb is not None else {'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 input_dtype = query_states.dtype device_type = query_states.device.type if query_states.device.type != 'mps' else 'cpu' if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_dtype(device_type) if hasattr(torch, 'get_autocast_dtype') else torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, 'sliding_window', None), is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output, cache_position) if not output_attentions: attn_weights = None return (attn_output, attn_weights)
class MoshiFlashAttention2(MoshiAttention): ''' Moshi flash attention module. This module inherits from `MoshiAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. ''' def __init__(self, *args, **kwargs): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: pass
4
1
53
9
37
12
6
0.38
1
6
2
0
2
2
2
14
113
19
74
25
62
28
41
15
38
10
2
2
11
4,010
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiFlexibleLinear
import torch.nn as nn import torch class MoshiFlexibleLinear(nn.Module): def __init__(self, input_size, output_size, num_layers): super().__init__() self.weight = nn.Parameter(torch.randn(num_layers, output_size, input_size)) def forward(self, x, layer_idx=None): """ `MoshiFlexibleLinear` creates one linear layer per codebook. There's multiple ways to use it. In the default case, `sequence_length=num_layers`, so each element of the sequence will be matmul to the weights corresponding to its index on the sequence. For more advanced cases, one can specify which codebook's layer(s) to use with `layer_idx`. If `layer_idx` indicates a single integer, all of the element of the sequence will be matmul to this single codebook's layer. But if `layer_idx` is a tensor of shape `(seq_length,)`, it will matmul each i-th element of the input sequence to the corresponding layer `weight[i]`. Args: x (`torch.FloatTensor): input to the layer of shape `(batch, num_layers, embed_dim)` or of shape `(batch, seq_length, embed_dim)` layer_idx (`torch.Tensor`, *optional*): Can be used to specify which codebook's layers(s) to use. If it's a tensor of shape `(seq_length,)`, will matmul each element of the sequence to the corresponding weights. But if `layer_idx` is a tensor of shape `(seq_length,)`, it will matmul each i-th element of the input sequence to the corresponding layer `weight[i]`. """ selected_weights = torch.index_select(self.weight, 0, layer_idx) if layer_idx is not None else self.weight selected_weights = selected_weights.transpose(1, 2)[None, :, :, :] x = torch.matmul(x[:, :, None, :], selected_weights) return x.squeeze(2)
class MoshiFlexibleLinear(nn.Module): def __init__(self, input_size, output_size, num_layers): pass def forward(self, x, layer_idx=None): ''' `MoshiFlexibleLinear` creates one linear layer per codebook. There's multiple ways to use it. In the default case, `sequence_length=num_layers`, so each element of the sequence will be matmul to the weights corresponding to its index on the sequence. For more advanced cases, one can specify which codebook's layer(s) to use with `layer_idx`. If `layer_idx` indicates a single integer, all of the element of the sequence will be matmul to this single codebook's layer. But if `layer_idx` is a tensor of shape `(seq_length,)`, it will matmul each i-th element of the input sequence to the corresponding layer `weight[i]`. Args: x (`torch.FloatTensor): input to the layer of shape `(batch, num_layers, embed_dim)` or of shape `(batch, seq_length, embed_dim)` layer_idx (`torch.Tensor`, *optional*): Can be used to specify which codebook's layers(s) to use. If it's a tensor of shape `(seq_length,)`, will matmul each element of the sequence to the corresponding weights. But if `layer_idx` is a tensor of shape `(seq_length,)`, it will matmul each i-th element of the input sequence to the corresponding layer `weight[i]`. ''' pass
3
1
18
4
4
10
2
2.22
1
1
0
0
2
1
2
12
37
8
9
5
6
20
9
5
6
2
1
0
3
4,011
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiForCausalLM
import torch.nn as nn from ...generation import GenerationConfig, GenerationMixin import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...cache_utils import Cache, DynamicCache, StaticCache from typing import Any, Optional, Union @auto_docstring(custom_intro='\n The Moshi decoder model with a text language modelling head on top. Only usable for text.\n ') class MoshiForCausalLM(MoshiPreTrainedModel, GenerationMixin): _tied_weights_keys = ['model.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config): super().__init__(config) self.model = MoshiModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, MoshiCausalLMOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, MoshiForCausalLM >>> model = MoshiForCausalLM.from_pretrained("kmhf/hf-moshiko") >>> tokenizer = AutoTokenizer.from_pretrained("kmhf/hf-moshiko") >>> prompt = "What is your favorite condiment?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "What is your favorite condiment?" ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: logits = logits.float() shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) shift_labels = shift_labels.to(shift_logits.device) loss = self.loss_function(shift_logits, shift_labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (logits, hidden_states) + outputs[1:] return (loss,) + output if loss is not None else output return MoshiCausalLMOutputWithPast(loss=loss, logits=logits, last_hidden_state=hidden_states, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n The Moshi decoder model with a text language modelling head on top. Only usable for text.\n ') class MoshiForCausalLM(MoshiPreTrainedModel, GenerationMixin): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, MoshiCausalLMOutputWithPast]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, MoshiForCausalLM >>> model = MoshiForCausalLM.from_pretrained("kmhf/hf-moshiko") >>> tokenizer = AutoTokenizer.from_pretrained("kmhf/hf-moshiko") >>> prompt = "What is your favorite condiment?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "What is your favorite condiment?" ```''' pass
5
1
16
2
11
4
2
0.37
2
8
3
0
8
3
8
9
145
21
91
37
64
34
43
21
34
9
2
1
16
4,012
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiForConditionalGeneration
from .configuration_moshi import MoshiConfig, MoshiDepthConfig from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput, Seq2SeqLMOutput from ..auto.modeling_auto import AutoModel from ...generation import GenerationConfig, GenerationMixin import math from ...utils import auto_docstring, is_torch_flex_attn_available, logging from typing import Any, Optional, Union import torch.nn as nn from ...cache_utils import Cache, DynamicCache, StaticCache import torch @auto_docstring(custom_intro='\n The original Moshi model with an audio encoder, a Moshi depth decoder and a Moshi decoder, for speech-to-speech.\n ') class MoshiForConditionalGeneration(MoshiPreTrainedModel, GenerationMixin): _tied_weights_keys = ['decoder.model.embed_tokens.weight', 'decoder.lm_head.weight'] config: MoshiConfig main_input_name = 'input_ids' supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True def __init__(self, config: MoshiConfig): super().__init__(config) self.embed_tokens = nn.ModuleList([nn.Embedding(config.audio_vocab_size + 1, config.hidden_size) for _ in range(2 * config.num_codebooks)]) self.audio_encoder = AutoModel.from_config(config.audio_encoder_config) self.decoder = MoshiForCausalLM(config) self.depth_decoder = MoshiDepthDecoder._from_config(config.depth_decoder_config) self.num_codebooks = config.num_codebooks self.post_init() def get_audio_encoder(self): return self.audio_encoder def get_depth_decoder(self): return self.depth_decoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.BoolTensor]=None, user_input_values: Optional[torch.FloatTensor]=None, user_audio_codes: Optional[torch.Tensor]=None, moshi_input_values: Optional[torch.FloatTensor]=None, moshi_audio_codes: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, text_labels: Optional[torch.LongTensor]=None, audio_labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, Seq2SeqLMOutput]: """ user_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*): The audio waveforms used as audio user prompt for the generation. user_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio user prompt for the generation. Has priority over `user_input_values` and represents the audio "tokens" of `user_input_values` once passed through the audio encoder. moshi_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*): The audio waveforms used as audio Moshi prompt for the generation. moshi_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio Moshi prompt for the generation. Has priority over `moshi_input_values` and represents the audio "tokens" of `moshi_input_values` once passed through the audio encoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `input_ids` and `inputs_embeds` are both unset, `inputs_embeds` takes the value of `inputs_embeds`. text_labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for text language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` audio_labels (`torch.LongTensor` of shape `(batch_size, num_codebooks, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.audio_vocab_size]` Examples: ```python >>> from transformers import MoshiForConditionalGeneration >>> import torch >>> model = MoshiForConditionalGeneration.from_pretrained("kmhf/hf-moshiko") >>> inputs = moshi.get_unconditional_inputs() >>> logits = model(**inputs, ).logits >>> logits.shape # (bsz, seq_len, text_vocab_size) torch.Size([1, 1, 32000]) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict kwargs_audio_encoder = {argument[len('audio_encoder_')]: value for argument, value in kwargs.items() if argument.startswith('audio_encoder_')} kwargs_decoder = {argument[len('decoder_'):]: value for argument, value in kwargs.items() if argument.startswith('decoder_')} kwargs_depth_decoder = {argument[len('depth_decoder_'):]: value for argument, value in kwargs.items() if argument.startswith('depth_decoder_')} if inputs_embeds is None: if user_input_values is not None and user_audio_codes is None: user_audio_codes = self.audio_encoder.encode(user_input_values, num_quantizers=self.num_codebooks, **kwargs_audio_encoder)[0] if moshi_input_values is not None and moshi_audio_codes is None: moshi_audio_codes = self.audio_encoder.encode(moshi_input_values, num_quantizers=self.num_codebooks, **kwargs_audio_encoder)[0] audio_codes = torch.cat([moshi_audio_codes, user_audio_codes], dim=1) if input_ids is None and audio_codes is None: raise ValueError('You must provide at least one of `input_ids`, `inputs_embeds`, `input_values` and `audio_codes`.') if input_ids is not None: inputs_embeds = self.decoder.model.embed_tokens(input_ids) if audio_codes is not None: audio_inputs_embeds = sum([self.embed_tokens[codebook](audio_codes[:, codebook]) for codebook in range(audio_codes.shape[1])]) inputs_embeds = audio_inputs_embeds if inputs_embeds is None else audio_inputs_embeds + inputs_embeds.to(audio_inputs_embeds.device) decoder_outputs = self.decoder(attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, past_key_values=past_key_values, return_dict=True, labels=text_labels, **kwargs_decoder) decoder_last_hidden_state = decoder_outputs.last_hidden_state depth_decoder_outputs = None final_loss = decoder_outputs.loss if text_labels is not None and audio_labels is not None: audio_labels = self.build_delay_pattern_mask(audio_labels, bos_token_id=self.config.audio_vocab_size, pad_token_id=self.config.audio_vocab_size, max_length=audio_labels.shape[-1] + 1)[0] text_labels = text_labels.view(-1, 1) audio_labels = audio_labels.transpose(1, 2).reshape(-1, audio_labels.shape[1]) depth_input_ids = torch.cat([text_labels, audio_labels], dim=1) depth_input_ids = depth_input_ids[:, :-1] decoder_last_hidden_state = decoder_last_hidden_state.view(-1, 1, decoder_last_hidden_state.shape[-1]) depth_decoder_outputs = self.depth_decoder(last_hidden_state=decoder_last_hidden_state, input_ids=depth_input_ids, attention_mask=attention_mask, labels=audio_labels, **kwargs_depth_decoder) final_loss += depth_decoder_outputs.loss if not return_dict: outputs = decoder_outputs.to_tuple() if depth_decoder_outputs is not None: outputs += depth_decoder_outputs.to_tuple() return outputs return MoshiConditionalGenerationOutputWithPast(loss=decoder_outputs.loss, logits=decoder_outputs.logits, last_hidden_state=decoder_last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, depth_loss=None if depth_decoder_outputs is None else depth_decoder_outputs.loss, audio_logits=None if depth_decoder_outputs is None else depth_decoder_outputs.logits, depth_past_key_values=None if decoder_outputs is None else decoder_outputs.past_key_values, depth_hidden_states=None if decoder_outputs is None else decoder_outputs.hidden_states, depth_attentions=None if decoder_outputs is None else decoder_outputs.attentions) def _prepare_attention_mask_for_generation(self, input_ids: torch.LongTensor, generation_config: GenerationConfig, kwargs: dict[str, Any]) -> torch.LongTensor: pad_token_id = generation_config.pad_token_id eos_token_id = generation_config.eos_token_id default_attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) if pad_token_id is None: return default_attention_mask is_pad_token_in_inputs = pad_token_id is not None and torch.isin(input_ids, pad_token_id).any() is_pad_token_not_equal_to_eos_token_id = eos_token_id is None or ~torch.isin(eos_token_id, pad_token_id).any() can_infer_attention_mask = is_pad_token_in_inputs * is_pad_token_not_equal_to_eos_token_id attention_mask_from_padding = input_ids.ne(pad_token_id).long() attention_mask = attention_mask_from_padding * can_infer_attention_mask + default_attention_mask * ~can_infer_attention_mask return attention_mask def _prepare_inputs_embeds_for_generation(self, input_ids: Optional[torch.LongTensor]=None, user_input_values: Optional[torch.FloatTensor]=None, user_audio_codes: Optional[torch.Tensor]=None, moshi_input_values: Optional[torch.FloatTensor]=None, moshi_audio_codes: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, generation_config: Optional[GenerationConfig]=None, apply_delay_pattern_mask: bool=False, concat_unconditional_inputs: bool=False): user_delay_pattern_mask = None moshi_delay_pattern_mask = None if inputs_embeds is None and input_ids is None and (user_input_values is None) and (user_audio_codes is None) and (moshi_input_values is None) and (moshi_audio_codes is None): raise ValueError('You must provide at least one of `input_ids`, `user_input_values`, `moshi_input_values`, `user_audio_codes`, `moshi_audio_codes` or `inputs_embeds`.') if inputs_embeds is None or apply_delay_pattern_mask: if user_input_values is not None and user_audio_codes is None: user_audio_codes = self.audio_encoder.encode(user_input_values, num_quantizers=self.num_codebooks)[0] if moshi_input_values is not None and moshi_audio_codes is None: moshi_audio_codes = self.audio_encoder.encode(moshi_input_values, num_quantizers=self.num_codebooks)[0] if inputs_embeds is None and concat_unconditional_inputs: unconditional_inputs = self.get_unconditional_inputs(num_samples=user_audio_codes.shape[0]) moshi_audio_codes = torch.cat([unconditional_inputs.moshi_audio_codes, moshi_audio_codes], dim=2) user_audio_codes = torch.cat([unconditional_inputs.user_audio_codes, user_audio_codes], dim=2) input_ids = torch.cat([unconditional_inputs.input_ids, input_ids], dim=1) if attention_mask is not None: attention_mask = torch.cat([unconditional_inputs.attention_mask, attention_mask], dim=1) if inputs_embeds is None or apply_delay_pattern_mask: if apply_delay_pattern_mask and user_audio_codes is not None: user_audio_codes, user_delay_pattern_mask = self.build_delay_pattern_mask(user_audio_codes, bos_token_id=self.config.audio_vocab_size, pad_token_id=self.config.audio_vocab_size, max_length=generation_config.max_length) if apply_delay_pattern_mask and moshi_audio_codes is not None: moshi_audio_codes, moshi_delay_pattern_mask = self.build_delay_pattern_mask(moshi_audio_codes, bos_token_id=self.config.audio_vocab_size, pad_token_id=self.config.audio_vocab_size, max_length=generation_config.max_length) if inputs_embeds is None: audio_inputs_embeds = None if user_audio_codes is not None and moshi_audio_codes is not None: audio_codes = torch.cat([moshi_audio_codes, user_audio_codes], dim=1) audio_inputs_embeds = sum([self.embed_tokens[codebook](audio_codes[:, codebook]) for codebook in range(audio_codes.shape[1])]) elif moshi_audio_codes is not None: audio_codes = moshi_audio_codes audio_inputs_embeds = sum([self.embed_tokens[codebook](audio_codes[:, codebook]) for codebook in range(audio_codes.shape[1])]) elif user_audio_codes is not None: audio_codes = user_audio_codes audio_inputs_embeds = sum([self.embed_tokens[codebook](audio_codes[:, codebook + self.num_codebooks]) for codebook in range(audio_codes.shape[1])]) if input_ids is not None: inputs_embeds = self.decoder.model.embed_tokens(input_ids) if audio_inputs_embeds is not None: inputs_embeds = audio_inputs_embeds if inputs_embeds is None else audio_inputs_embeds + inputs_embeds.to(audio_inputs_embeds.device) return (inputs_embeds, input_ids, user_audio_codes, moshi_audio_codes, user_delay_pattern_mask, moshi_delay_pattern_mask, attention_mask) @torch.no_grad() def generate(self, input_ids: Optional[torch.LongTensor]=None, user_input_values: Optional[torch.FloatTensor]=None, user_audio_codes: Optional[torch.Tensor]=None, moshi_input_values: Optional[torch.FloatTensor]=None, moshi_audio_codes: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, return_audio_waveforms: Optional[bool]=True, return_audio_codes: Optional[bool]=None, concat_unconditional_inputs: Optional[bool]=True, **kwargs) -> torch.LongTensor: """ Generates sequences of text token ids and audio tokens ids. Parameters: input_ids (`torch.Tensor `of shape `(batch_size, sequence_length), *optional*): The sequence used as a text prompt for the generation. user_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*): The audio waveforms used as audio user prompt for the generation. user_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio user prompt for the generation. Has priority over `user_input_values` and represents the audio "tokens" of `user_input_values` once passed through the audio encoder. moshi_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*): The audio waveforms used as audio Moshi prompt for the generation. moshi_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio Moshi prompt for the generation. Has priority over `moshi_input_values` and represents the audio "tokens" of `moshi_input_values` once passed through the audio encoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` and the audio inputs you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert the inputs into associated vectors than the model's internal embedding lookup matrix. return_audio_waveforms (`bool`, *optional*, defaults to `True`): If `False`, won't generate the audio waveforms. return_audio_codes (`bool`, *optional*): If `True`, will also returns the generated audio codes, i.e the intermediate audio "tokens" which transforms to `audio_sequences` once passed through the audio decoder. concat_unconditional_inputs (`bool`, *optional*, defaults to `True`): If `False`, won't concatenate initial audio and text tokens. kwargs (`dict[str, Any]`, *optional*): Remaining dictionary of keyword arguments that are passed to the `generate` method. Refers to the original [`generate` docstrings](https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationMixin.generate) for more information on how to use them. Note that keywords with a *depth_* prefix will be input for the `generate` method of the depth decoder. Otherwise, the latter will use its default generation config. Return: [`MoshiConditionalGenerationGenerateOutput`] """ if hasattr(self, 'hf_device_map') and (not hasattr(self.depth_decoder, 'hf_device_map')): self.depth_decoder.hf_device_map = {} if '' in self.hf_device_map: self.depth_decoder.hf_device_map = self.hf_device_map else: main_device = [d for d in self.hf_device_map.values() if d not in ['cpu', 'disk']][0] self.depth_decoder.hf_device_map = {key[len('depth_decoder'):]: main_device if value in ['cpu', 'disk'] else value for key, value in self.hf_device_map.items() if key.startswith('depth_decoder')} self.hf_device_map = {key: value for key, value in self.hf_device_map.items() if not key.startswith('depth_decoder')} depth_decoder_kwargs_keys = {argument for argument in kwargs if argument.startswith('depth_decoder_')} kwargs_depth_decoder = {argument[len('depth_decoder_'):]: kwargs.pop(argument) for argument in depth_decoder_kwargs_keys} generation_config, kwargs = self._prepare_generation_config(kwargs.pop('generation_config', None), **kwargs) input_ids, user_audio_codes, moshi_audio_codes, concat_unconditional_inputs = self._check_and_maybe_initialize_inputs(input_ids=input_ids, user_input_values=user_input_values, user_audio_codes=user_audio_codes, moshi_input_values=moshi_input_values, moshi_audio_codes=moshi_audio_codes, inputs_embeds=inputs_embeds, concat_unconditional_inputs=concat_unconditional_inputs) inputs = inputs_embeds if input_ids is None else input_ids input_ids_length = inputs.shape[-1] + 1 if concat_unconditional_inputs else inputs.shape[-1] has_default_max_length = kwargs.get('max_length') is None and generation_config.max_length is not None has_default_min_length = kwargs.get('min_length') is None and generation_config.min_length is not None generation_config = self._prepare_generated_length(generation_config=generation_config, has_default_max_length=has_default_max_length, has_default_min_length=has_default_min_length, model_input_name='inputs_embeds' if input_ids is None else 'input_ids', inputs_tensor=inputs, input_ids_length=input_ids_length) if hasattr(generation_config, 'depth_decoder_config'): depth_decoder_generation_config = generation_config.depth_decoder_config else: depth_decoder_generation_config = {'min_length': self.num_codebooks + 1, 'max_length': self.num_codebooks + 1, 'cache_implementation': 'static'} depth_decoder_generation_config.update(kwargs_depth_decoder) kwargs_depth_decoder = depth_decoder_generation_config attention_mask = kwargs.pop('attention_mask', None) if attention_mask is None: attention_mask = self._prepare_attention_mask_for_generation(input_ids=input_ids, generation_config=generation_config, kwargs=kwargs) inputs_embeds, input_ids, user_audio_codes, moshi_audio_codes, user_delay_pattern_mask, moshi_delay_pattern_mask, attention_mask = self._prepare_inputs_embeds_for_generation(input_ids=input_ids, user_input_values=user_input_values, user_audio_codes=user_audio_codes, moshi_input_values=moshi_input_values, moshi_audio_codes=moshi_audio_codes, inputs_embeds=inputs_embeds, attention_mask=attention_mask, generation_config=generation_config, apply_delay_pattern_mask=True, concat_unconditional_inputs=concat_unconditional_inputs) blank_input_values = torch.zeros((inputs_embeds.shape[0], 1, int(self.config.sampling_rate / self.config.audio_encoder_config.frame_rate)), dtype=self.dtype, device=self.device) blank_user_audio_codes = self.audio_encoder.encode(blank_input_values, num_quantizers=self.num_codebooks)[0] kwargs['user_delay_pattern_mask'] = user_delay_pattern_mask if user_delay_pattern_mask is not None else kwargs.get('user_delay_pattern_mask') kwargs['moshi_delay_pattern_mask'] = moshi_delay_pattern_mask if moshi_delay_pattern_mask is not None else kwargs.get('moshi_delay_pattern_mask') self.generated_audio_codes = torch.repeat_interleave(moshi_audio_codes, max(generation_config.num_beams, generation_config.num_return_sequences), dim=0) return_dict_in_generate = generation_config.num_beams > 1 or generation_config.return_dict_in_generate output_scores = generation_config.num_beams > 1 or generation_config.output_scores outputs = super().generate(inputs_embeds=inputs_embeds, input_ids=input_ids, generation_config=generation_config, blank_user_audio_codes=blank_user_audio_codes, kwargs_depth_decoder=kwargs_depth_decoder, return_dict_in_generate=return_dict_in_generate, output_scores=output_scores, attention_mask=attention_mask, **kwargs) if not return_audio_waveforms and (not return_audio_codes): if return_dict_in_generate and (not generation_config.return_dict_in_generate): return outputs.sequences return outputs if not return_dict_in_generate: output_text_ids = outputs else: output_text_ids = outputs.sequences if generation_config.num_return_sequences > 1: moshi_delay_pattern_mask = torch.repeat_interleave(moshi_delay_pattern_mask, generation_config.num_return_sequences, dim=0) if generation_config.num_beams > 1: beam_indices = outputs.beam_indices[:, :-moshi_audio_codes.shape[-1]] generated_audio_codes = self.generated_audio_codes[:, :, moshi_audio_codes.shape[-1]:] expanded_beam_indices = beam_indices[:, :-1].unsqueeze(1).expand(-1, self.num_codebooks, -1) generated_audio_codes = torch.gather(generated_audio_codes, dim=0, index=expanded_beam_indices) moshi_audio_codes = torch.repeat_interleave(moshi_audio_codes, generation_config.num_return_sequences, dim=0) self.generated_audio_codes = torch.cat((moshi_audio_codes, generated_audio_codes), dim=2) self.last_hidden_state = torch.index_select(self.last_hidden_state, dim=0, index=beam_indices[:, -1]) last_hidden_state = self.last_hidden_state.view(-1, 1, self.last_hidden_state.shape[-1]) last_generated_audio_codes = self.depth_decoder.generate(last_hidden_state=last_hidden_state, input_ids=output_text_ids[:, -1:].view(-1, 1), **kwargs_depth_decoder) last_generated_audio_codes = last_generated_audio_codes[:, 1:].unsqueeze(2) self.generated_audio_codes = torch.cat([self.generated_audio_codes, last_generated_audio_codes], dim=2) output_audio_codes = self.apply_delay_pattern_mask(self.generated_audio_codes, moshi_delay_pattern_mask) mask = moshi_delay_pattern_mask != self.config.audio_vocab_size output_audio_codes = output_audio_codes[mask].reshape(mask.shape[0], self.num_codebooks, -1) output_values = None if return_audio_waveforms: output_values = self.audio_encoder.decode(output_audio_codes).audio_values output_audio_codes = output_audio_codes if return_audio_codes else None if generation_config.return_dict_in_generate: return MoshiConditionalGenerationGenerateOutput(audio_sequences=output_values, audio_codes=output_audio_codes, **outputs) return MoshiConditionalGenerationGenerateOutput(audio_sequences=output_values, sequences=output_text_ids, audio_codes=output_audio_codes) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, logits_to_keep=None, user_delay_pattern_mask=None, moshi_delay_pattern_mask=None, kwargs_depth_decoder=None, blank_user_audio_codes: Optional[torch.FloatTensor]=None, **kwargs): if past_key_values is not None: if inputs_embeds is not None or cache_position[-1] >= input_ids.shape[1]: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None} else: model_inputs = {'input_ids': input_ids, 'inputs_embeds': None} if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: if model_inputs['inputs_embeds'] is not None: batch_size, sequence_length, _ = inputs_embeds.shape device = inputs_embeds.device else: batch_size, sequence_length = input_ids.shape device = input_ids.device attention_mask = self.decoder.model._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_cache_shape(), dtype=self.decoder.lm_head.weight.dtype, device=device, cache_position=cache_position, batch_size=batch_size, config=self.config, past_key_values=past_key_values) model_inputs.update({'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask, 'cache_position': cache_position}) if model_inputs['input_ids'] is not None: last_hidden_state = kwargs.pop('last_hidden_state') last_hidden_state = last_hidden_state.view(-1, 1, last_hidden_state.shape[-1]) input_ids = model_inputs.pop('input_ids') generated_audio_codes = self.depth_decoder.generate(last_hidden_state=last_hidden_state, input_ids=input_ids.view(-1, 1), **kwargs_depth_decoder) generated_audio_codes = generated_audio_codes[:, 1:].unsqueeze(2) user_audio_codes = self.apply_delay_pattern_mask(torch.cat([self.generated_audio_codes, blank_user_audio_codes.to(self.generated_audio_codes.device)], dim=2), user_delay_pattern_mask)[:, :, -1:] self.generated_audio_codes = self.apply_delay_pattern_mask(torch.cat([self.generated_audio_codes, generated_audio_codes], dim=2), moshi_delay_pattern_mask) inputs_embeds, _, _, _, _, _, _ = self._prepare_inputs_embeds_for_generation(input_ids, moshi_audio_codes=self.generated_audio_codes[:, :, -1:], user_audio_codes=user_audio_codes) model_inputs['input_ids'] = None model_inputs['inputs_embeds'] = inputs_embeds for key, value in kwargs.items(): if key not in model_inputs: model_inputs[key] = value return model_inputs def _update_model_kwargs_for_generation(self, outputs: ModelOutput, model_kwargs: dict[str, Any], is_encoder_decoder: bool=False, num_new_tokens: int=1) -> dict[str, Any]: model_kwargs = super()._update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder, num_new_tokens) model_kwargs['last_hidden_state'] = outputs.get('last_hidden_state')[:, -1:] self.last_hidden_state = outputs.get('last_hidden_state')[:, -1:] return model_kwargs def get_input_embeddings(self): return self.decoder.get_input_embeddings() def set_input_embeddings(self, value): self.decoder.set_input_embeddings(value) def get_output_embeddings(self): return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.decoder.set_output_embeddings(new_embeddings) def freeze_audio_encoder(self): """ Freeze the audio encoder weights. """ for param in self.audio_encoder.parameters(): param.requires_grad = False self.audio_encoder._requires_grad = False def freeze_depth_decoder(self): """ Freeze the depth encoder weights. """ for param in self.depth_decoder.parameters(): param.requires_grad = False self.depth_decoder._requires_grad = False @staticmethod def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask): """Apply a delay pattern mask to the decoder input ids, only preserving predictions where the mask is set to -1, and otherwise setting to the value detailed in the mask.""" seq_len = input_ids.shape[-1] decoder_pad_token_mask = decoder_pad_token_mask[..., :seq_len] input_ids = torch.where(decoder_pad_token_mask == -1, input_ids, decoder_pad_token_mask) return input_ids def build_delay_pattern_mask(self, input_ids: torch.LongTensor, bos_token_id: int, pad_token_id: int, max_length: Optional[int]=None): """Build a delayed pattern mask to the input_ids. Each codebook, except the first one, is offset by one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there are 4 codebooks and a max sequence length of 6, we have the delayed pattern mask of shape `(codebooks, seq_len)`: - [-1, -1, -1, -1, -1, P] - [ B, -1, -1, -1, -1, -1] - [ B, -1, -1, -1, -1, -1] - [ B, -1, -1, -1, -1, -1] where B is the beginning-of-sentence token, P is the special padding token id and -1 indicates that the token is valid for prediction. If we include a prompt (input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the mask is set to the value in the prompt: - [ a0, a1, -1, -1, -1, P] - [ B, b0, b1, -1, -1, -1] - [ B, c0, c1, -1, -1, -1] - [ B, d0, d1, -1, -1, -1] where a-d indicate the codebook channel and 0/1 indicates the temporality. Now, we only override the -1 tokens in our prediction. """ bsz, num_codebooks, seq_len = input_ids.shape max_length = max_length if max_length is not None else self.generation_config.max_length input_ids_shifted = torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1 seq_len_to_keep = min(seq_len, max_length - 1) input_ids_shifted[:, 0, :seq_len_to_keep] = input_ids[:, 0, :seq_len_to_keep] input_ids_shifted[:, 1:, 1:seq_len_to_keep + 1] = input_ids[:, 1:, :seq_len_to_keep] input_ids_shifted[:, 1:, 0] = bos_token_id input_ids_shifted[:, 0, -1] = pad_token_id pattern_mask = input_ids_shifted input_ids = input_ids_shifted[..., :seq_len_to_keep] return (input_ids, pattern_mask) def get_unconditional_inputs(self, num_samples=1): """ Helper function to get null inputs for unconditional generation, enabling the model to be used without the feature extractor or tokenizer. Args: num_samples (int, *optional*): Number of audio samples to unconditionally generate. max_new_tokens (int, *optional*): Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of longer inference (since more audio tokens need to be generated per sample). Example: ```python >>> from transformers import MoshiForConditionalGeneration >>> model = MoshiForConditionalGeneration.from_pretrained("kmhf/hf-moshiko-pytorch-bf16") >>> # get the unconditional (or 'null') inputs for the model >>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1) >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256) ```""" input_ids = torch.ones((num_samples, 1), device=self.device, dtype=torch.int64) * self.config.vocab_size user_audio_codes = torch.ones((num_samples, self.num_codebooks, 1), device=self.device, dtype=torch.int64) * self.config.audio_vocab_size moshi_audio_codes = torch.ones((num_samples, self.num_codebooks, 1), device=self.device, dtype=torch.int64) * self.config.audio_vocab_size attention_mask = torch.ones((num_samples, 1), device=self.device, dtype=torch.long) return MoshiUnconditionalInput(input_ids=input_ids, user_audio_codes=user_audio_codes, moshi_audio_codes=moshi_audio_codes, attention_mask=attention_mask) def _check_and_maybe_initialize_inputs(self, input_ids=None, user_input_values=None, user_audio_codes=None, moshi_input_values=None, moshi_audio_codes=None, inputs_embeds=None, concat_unconditional_inputs=None): inputs = input_ids if inputs_embeds is None else inputs_embeds user_input = user_audio_codes if user_input_values is None else user_input_values moshi_input = moshi_audio_codes if moshi_input_values is None else moshi_input_values one_input_has_been_passed = user_input is not None or moshi_input is not None or inputs is not None concat_unconditional_inputs = concat_unconditional_inputs and (not (inputs_embeds is not None and input_ids is None)) if one_input_has_been_passed and user_input is None: raise ValueError('No user audio inputs have been passed alongside the other inputs. Make sure either `user_input_values` or `user_audio_codes` is passed or use `MoshiForConditionalGeneration.get_unconditional_inputs`. Check the `MoshiForConditionalGeneration` docstrings for more information.') elif one_input_has_been_passed and moshi_input is None: raise ValueError('No Moshi audio inputs have been passed alongside the other inputs. Make sure either `moshi_input_values` or `moshi_audio_codes` is passed or use `MoshiForConditionalGeneration.get_unconditional_inputs`. Check the `MoshiForConditionalGeneration` docstrings for more information.') elif one_input_has_been_passed and inputs is None: raise ValueError('No `input_ids` or `inputs_embeds` have been passed alongside the other inputs. Make sure `input_ids` is passed or use `MoshiForConditionalGeneration.get_unconditional_inputs`. Check the `MoshiForConditionalGeneration` docstrings for more information.') elif not one_input_has_been_passed: unconditional_inputs = self.get_unconditional_inputs() input_ids = unconditional_inputs.input_ids user_audio_codes = unconditional_inputs.user_audio_codes moshi_audio_codes = unconditional_inputs.moshi_audio_codes concat_unconditional_inputs = False else: user_seq_length = user_input.shape[-1] moshi_seq_length = moshi_input.shape[-1] tokens_seq_length = inputs.shape[1] ratio = self.config.audio_encoder_config.frame_rate / self.config.sampling_rate moshi_seq_length = math.ceil(moshi_seq_length * ratio) if moshi_audio_codes is None else moshi_seq_length user_seq_length = math.ceil(user_seq_length * ratio) if user_audio_codes is None else user_seq_length if tokens_seq_length != moshi_seq_length or tokens_seq_length != user_seq_length: raise ValueError("At least one of the 3 inputs of `MoshiForConditionalGeneration` doesn't have the same sequence length as the others.Make sure that they all have the same sequence length. Check the `MoshiForConditionalGeneration` docstrings for more information.") return (input_ids, user_audio_codes, moshi_audio_codes, concat_unconditional_inputs)
@auto_docstring(custom_intro='\n The original Moshi model with an audio encoder, a Moshi depth decoder and a Moshi decoder, for speech-to-speech.\n ') class MoshiForConditionalGeneration(MoshiPreTrainedModel, GenerationMixin): def __init__(self, config: MoshiConfig): pass def get_audio_encoder(self): pass def get_depth_decoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.BoolTensor]=None, user_input_values: Optional[torch.FloatTensor]=None, user_audio_codes: Optional[torch.Tensor]=None, moshi_input_values: Optional[torch.FloatTensor]=None, moshi_audio_codes: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, text_labels: Optional[torch.LongTensor]=None, audio_labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, Seq2SeqLMOutput]: ''' user_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*): The audio waveforms used as audio user prompt for the generation. user_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio user prompt for the generation. Has priority over `user_input_values` and represents the audio "tokens" of `user_input_values` once passed through the audio encoder. moshi_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*): The audio waveforms used as audio Moshi prompt for the generation. moshi_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio Moshi prompt for the generation. Has priority over `moshi_input_values` and represents the audio "tokens" of `moshi_input_values` once passed through the audio encoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `input_ids` and `inputs_embeds` are both unset, `inputs_embeds` takes the value of `inputs_embeds`. text_labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for text language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` audio_labels (`torch.LongTensor` of shape `(batch_size, num_codebooks, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.audio_vocab_size]` Examples: ```python >>> from transformers import MoshiForConditionalGeneration >>> import torch >>> model = MoshiForConditionalGeneration.from_pretrained("kmhf/hf-moshiko") >>> inputs = moshi.get_unconditional_inputs() >>> logits = model(**inputs, ).logits >>> logits.shape # (bsz, seq_len, text_vocab_size) torch.Size([1, 1, 32000]) ```''' pass def _prepare_attention_mask_for_generation(self, input_ids: torch.LongTensor, generation_config: GenerationConfig, kwargs: dict[str, Any]) -> torch.LongTensor: pass def _prepare_inputs_embeds_for_generation(self, input_ids: Optional[torch.LongTensor]=None, user_input_values: Optional[torch.FloatTensor]=None, user_audio_codes: Optional[torch.Tensor]=None, moshi_input_values: Optional[torch.FloatTensor]=None, moshi_audio_codes: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, generation_config: Optional[GenerationConfig]=None, apply_delay_pattern_mask: bool=False, concat_unconditional_inputs: bool=False): pass @torch.no_grad() def generate(self, input_ids: Optional[torch.LongTensor]=None, user_input_values: Optional[torch.FloatTensor]=None, user_audio_codes: Optional[torch.Tensor]=None, moshi_input_values: Optional[torch.FloatTensor]=None, moshi_audio_codes: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, return_audio_waveforms: Optional[bool]=True, return_audio_codes: Optional[bool]=None, concat_unconditional_inputs: Optional[bool]=True, **kwargs) -> torch.LongTensor: ''' Generates sequences of text token ids and audio tokens ids. Parameters: input_ids (`torch.Tensor `of shape `(batch_size, sequence_length), *optional*): The sequence used as a text prompt for the generation. user_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*): The audio waveforms used as audio user prompt for the generation. user_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio user prompt for the generation. Has priority over `user_input_values` and represents the audio "tokens" of `user_input_values` once passed through the audio encoder. moshi_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*): The audio waveforms used as audio Moshi prompt for the generation. moshi_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio Moshi prompt for the generation. Has priority over `moshi_input_values` and represents the audio "tokens" of `moshi_input_values` once passed through the audio encoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` and the audio inputs you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert the inputs into associated vectors than the model's internal embedding lookup matrix. return_audio_waveforms (`bool`, *optional*, defaults to `True`): If `False`, won't generate the audio waveforms. return_audio_codes (`bool`, *optional*): If `True`, will also returns the generated audio codes, i.e the intermediate audio "tokens" which transforms to `audio_sequences` once passed through the audio decoder. concat_unconditional_inputs (`bool`, *optional*, defaults to `True`): If `False`, won't concatenate initial audio and text tokens. kwargs (`dict[str, Any]`, *optional*): Remaining dictionary of keyword arguments that are passed to the `generate` method. Refers to the original [`generate` docstrings](https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationMixin.generate) for more information on how to use them. Note that keywords with a *depth_* prefix will be input for the `generate` method of the depth decoder. Otherwise, the latter will use its default generation config. Return: [`MoshiConditionalGenerationGenerateOutput`] ''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, logits_to_keep=None, user_delay_pattern_mask=None, moshi_delay_pattern_mask=None, kwargs_depth_decoder=None, blank_user_audio_codes: Optional[torch.FloatTensor]=None, **kwargs): pass def _update_model_kwargs_for_generation(self, outputs: ModelOutput, model_kwargs: dict[str, Any], is_encoder_decoder: bool=False, num_new_tokens: int=1) -> dict[str, Any]: pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass def freeze_audio_encoder(self): ''' Freeze the audio encoder weights. ''' pass def freeze_depth_decoder(self): ''' Freeze the depth encoder weights. ''' pass @staticmethod def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask): '''Apply a delay pattern mask to the decoder input ids, only preserving predictions where the mask is set to -1, and otherwise setting to the value detailed in the mask.''' pass def build_delay_pattern_mask(self, input_ids: torch.LongTensor, bos_token_id: int, pad_token_id: int, max_length: Optional[int]=None): '''Build a delayed pattern mask to the input_ids. Each codebook, except the first one, is offset by one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there are 4 codebooks and a max sequence length of 6, we have the delayed pattern mask of shape `(codebooks, seq_len)`: - [-1, -1, -1, -1, -1, P] - [ B, -1, -1, -1, -1, -1] - [ B, -1, -1, -1, -1, -1] - [ B, -1, -1, -1, -1, -1] where B is the beginning-of-sentence token, P is the special padding token id and -1 indicates that the token is valid for prediction. If we include a prompt (input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the mask is set to the value in the prompt: - [ a0, a1, -1, -1, -1, P] - [ B, b0, b1, -1, -1, -1] - [ B, c0, c1, -1, -1, -1] - [ B, d0, d1, -1, -1, -1] where a-d indicate the codebook channel and 0/1 indicates the temporality. Now, we only override the -1 tokens in our prediction. ''' pass def get_unconditional_inputs(self, num_samples=1): ''' Helper function to get null inputs for unconditional generation, enabling the model to be used without the feature extractor or tokenizer. Args: num_samples (int, *optional*): Number of audio samples to unconditionally generate. max_new_tokens (int, *optional*): Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of longer inference (since more audio tokens need to be generated per sample). Example: ```python >>> from transformers import MoshiForConditionalGeneration >>> model = MoshiForConditionalGeneration.from_pretrained("kmhf/hf-moshiko-pytorch-bf16") >>> # get the unconditional (or 'null') inputs for the model >>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1) >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256) ```''' pass def _check_and_maybe_initialize_inputs(self, input_ids=None, user_input_values=None, user_audio_codes=None, moshi_input_values=None, moshi_audio_codes=None, inputs_embeds=None, concat_unconditional_inputs=None): pass
24
7
41
5
29
7
4
0.25
2
19
10
0
18
11
20
21
860
123
591
185
490
149
255
102
234
18
2
2
89
4,013
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiGatingMLP
import torch from ...activations import ACT2FN from typing import Any, Optional, Union import torch.nn as nn class MoshiGatingMLP(nn.Module): def __init__(self, config, use_flexible_linear=False): super().__init__() self.activation_fn = ACT2FN[config.hidden_act] ffn_dim = config.ffn_dim hidden_size = config.hidden_size num_layers = config.num_codebooks if use_flexible_linear else 1 if num_layers == 1: self.fc1 = nn.Linear(hidden_size, ffn_dim, bias=False) self.fc2 = nn.Linear(ffn_dim // 2, hidden_size, bias=False) else: self.fc1 = MoshiFlexibleLinear(hidden_size, ffn_dim, num_layers) self.fc2 = MoshiFlexibleLinear(ffn_dim // 2, hidden_size, num_layers) def forward(self, hidden_states: torch.Tensor, layer_idx: Optional[int]=None) -> torch.Tensor: hidden_states = self.fc1(hidden_states) if layer_idx is None else self.fc1(hidden_states, layer_idx) batch_size, sequence_length, _ = hidden_states.shape hidden_states = hidden_states.view(batch_size, sequence_length, 2, -1) hidden_states = self.activation_fn(hidden_states[..., 0, :]) * hidden_states[..., 1, :] hidden_states = self.fc2(hidden_states) if layer_idx is None else self.fc2(hidden_states, layer_idx) return hidden_states
class MoshiGatingMLP(nn.Module): def __init__(self, config, use_flexible_linear=False): pass def forward(self, hidden_states: torch.Tensor, layer_idx: Optional[int]=None) -> torch.Tensor: pass
3
0
11
1
10
0
3
0
1
4
1
0
2
3
2
12
23
3
20
10
17
0
19
10
16
3
1
1
6
4,014
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiLinear
import torch.nn as nn class MoshiLinear(nn.Module): def __init__(self, input_dim, output_dim, num_codebooks, use_flexible_linear=False): super().__init__() self.use_flexible_linear = use_flexible_linear if not use_flexible_linear: self.linear = nn.Linear(input_dim, output_dim, bias=False) else: self.linear = MoshiFlexibleLinear(input_dim, output_dim, num_layers=num_codebooks) def forward(self, x, layer_idx=None): if self.use_flexible_linear: return self.linear(x, layer_idx) else: return self.linear(x)
class MoshiLinear(nn.Module): def __init__(self, input_dim, output_dim, num_codebooks, use_flexible_linear=False): pass def forward(self, x, layer_idx=None): pass
3
0
7
1
6
0
2
0
1
2
1
0
2
2
2
12
16
3
13
5
10
0
11
5
8
2
1
1
4
4,015
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiModel
import torch.nn as nn from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput, Seq2SeqLMOutput import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...cache_utils import Cache, DynamicCache, StaticCache from .configuration_moshi import MoshiConfig, MoshiDepthConfig from typing import Any, Optional, Union from ...modeling_attn_mask_utils import AttentionMaskConverter @auto_docstring class MoshiModel(MoshiPreTrainedModel): def __init__(self, config: MoshiConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size + 1, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([MoshiDecoderLayer(config, layer_idx, use_flexible_linear=False) for layer_idx in range(config.num_hidden_layers)]) self.norm = MoshiRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = None if attention_mask is not None: causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) hidden_states = inputs_embeds if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and past_key_values is not None: is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0] if is_padding_right: raise ValueError("You are attempting to perform batched generation with padding_side='right' this may lead to unexpected behaviour for Flash Attention version of Moshi. Make sure to call `tokenizer.padding_side = 'left'` before tokenizing the input. ") if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, sliding_window=self.config.sliding_window, is_training=self.training): return None dtype = input_tensor.dtype min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], config=self.config, past_key_values=past_key_values) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, config: MoshiConfig, past_key_values: Cache): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. config (`MoshiConfig`): The model's configuration class past_key_values (`Cache`): The cache class that is being used currently to generate """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) text_config = config.get_text_config() if getattr(text_config, 'use_sliding_window', True) and text_config.sliding_window is not None: is_static_sliding_cache = isinstance(past_key_values, StaticCache) and all(past_key_values.is_sliding) if not is_static_sliding_cache or sequence_length > target_length: sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= cache_position.reshape(-1, 1) - text_config.sliding_window diagonal_attend_mask.bitwise_or_(sliding_attend_mask) causal_mask *= diagonal_attend_mask causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() if attention_mask.shape[-1] > target_length: attention_mask = attention_mask[:, :target_length] mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
@auto_docstring class MoshiModel(MoshiPreTrainedModel): def __init__(self, config: MoshiConfig): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPast]: pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, config: MoshiConfig, past_key_values: Cache): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. config (`MoshiConfig`): The model's configuration class past_key_values (`Cache`): The cache class that is being used currently to generate ''' pass
8
1
48
5
37
8
8
0.24
1
16
9
0
5
7
6
7
306
34
225
70
187
53
110
38
103
25
2
3
45
4,016
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiPreTrainedModel
import torch.nn as nn from .configuration_moshi import MoshiConfig, MoshiDepthConfig from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, is_torch_flex_attn_available, logging @auto_docstring class MoshiPreTrainedModel(PreTrainedModel): config: MoshiConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['MoshiDecoderLayer', 'MimiTransformerLayer'] _supports_flash_attn = True _supports_sdpa = True main_input_name = 'input_ids' def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, MoshiFlexibleLinear): module.weight.data.normal_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, MoshiRMSNorm): module.weight.data.fill_(1.0)
@auto_docstring class MoshiPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
18
0
18
0
8
0.15
1
0
0
4
1
0
1
1
33
2
27
12
25
4
24
12
22
8
1
2
8
4,017
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiRMSNorm
import torch.nn as nn import torch class MoshiRMSNorm(nn.Module): def __init__(self, dim: int, eps: float=1e-06): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()) output = output * self.weight.float() return output.type_as(x) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.eps}'
class MoshiRMSNorm(nn.Module): def __init__(self, dim: int, eps: float=1e-06): pass def _norm(self, x): pass def forward(self, x): pass def extra_repr(self): pass
5
0
3
0
3
0
1
0.15
1
4
0
0
4
2
4
14
17
3
13
8
8
2
13
8
8
1
1
0
4
4,018
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiRotaryEmbedding
import torch.nn as nn from .configuration_moshi import MoshiConfig, MoshiDepthConfig from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update import torch class MoshiRotaryEmbedding(nn.Module): inv_freq: torch.Tensor def __init__(self, config: MoshiConfig, device=None): super().__init__() if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type')) else: self.rope_type = 'default' self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer('inv_freq', inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
class MoshiRotaryEmbedding(nn.Module): def __init__(self, config: MoshiConfig, device=None): pass @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): pass
5
0
18
2
13
5
3
0.35
1
4
1
0
3
7
3
13
59
8
40
21
35
14
38
20
34
3
1
1
8
4,019
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiSdpaAttention
import torch.nn as nn import torch from ...cache_utils import Cache, DynamicCache, StaticCache from typing import Any, Optional, Union from ...utils.deprecation import deprecate_kwarg class MoshiSdpaAttention(MoshiAttention): """ Moshi attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `MoshiAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if output_attentions: logger.warning_once('MoshiModel is using MoshiSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states, cache_position) key_states = self.k_proj(hidden_states, cache_position) value_states = self.v_proj(hidden_states, cache_position) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if self.rotary_emb is not None: cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} if self.rotary_emb is not None else {'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, :key_states.shape[-2]] if query_states.device.type == 'cuda' and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() is_causal = causal_mask is None and q_len > 1 attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output, cache_position) return (attn_output, None)
class MoshiSdpaAttention(MoshiAttention): ''' Moshi attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `MoshiAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. ''' @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: pass
3
1
83
13
64
14
9
0.31
1
4
1
0
1
0
1
13
91
14
65
21
53
20
33
11
31
9
2
1
9
4,020
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/moshi/modeling_moshi.py
transformers.models.moshi.modeling_moshi.MoshiUnconditionalInput
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput, Seq2SeqLMOutput from dataclasses import dataclass from ...utils import auto_docstring, is_torch_flex_attn_available, logging from typing import Any, Optional, Union import torch.nn as nn import torch @dataclass @auto_docstring class MoshiUnconditionalInput(ModelOutput): """ input_ids (`torch.Tensor `of shape `(batch_size, sequence_length), *optional*): The sequence used as a text prompt for the generation. user_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio user prompt for the generation. Has priority over `user_input_values` and represents the audio "tokens" of `user_input_values` once passed through the audio encoder. moshi_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio Moshi prompt for the generation. Has priority over `moshi_input_values` and represents the audio "tokens" of `moshi_input_values` once passed through the audio encoder. attention_mask (`torch.LongTensor`) of shape `(batch_size, sequence_length)`, *optional*): Attention mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**. """ input_ids: Optional[torch.LongTensor] = None user_audio_codes: Optional[torch.Tensor] = None moshi_audio_codes: Optional[torch.Tensor] = None attention_mask: Optional[torch.LongTensor] = None
@dataclass @auto_docstring class MoshiUnconditionalInput(ModelOutput): ''' input_ids (`torch.Tensor `of shape `(batch_size, sequence_length), *optional*): The sequence used as a text prompt for the generation. user_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio user prompt for the generation. Has priority over `user_input_values` and represents the audio "tokens" of `user_input_values` once passed through the audio encoder. moshi_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*): The audio codes used as audio Moshi prompt for the generation. Has priority over `moshi_input_values` and represents the audio "tokens" of `moshi_input_values` once passed through the audio encoder. attention_mask (`torch.LongTensor`) of shape `(batch_size, sequence_length)`, *optional*): Attention mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**. ''' pass
3
1
0
0
0
0
0
2.4
1
0
0
0
0
0
0
0
18
1
5
5
4
12
5
5
4
0
1
0
0
4,021
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/configuration_mpnet.py
transformers.models.mpnet.configuration_mpnet.MPNetConfig
from ...configuration_utils import PretrainedConfig class MPNetConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MPNetModel`] or a [`TFMPNetModel`]. It is used to instantiate a MPNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MPNet [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30527): Vocabulary size of the MPNet model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MPNetModel`] or [`TFMPNetModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. Examples: ```python >>> from transformers import MPNetModel, MPNetConfig >>> # Initializing a MPNet mpnet-base style configuration >>> configuration = MPNetConfig() >>> # Initializing a model from the mpnet-base style configuration >>> model = MPNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'mpnet' def __init__(self, vocab_size=30527, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, relative_attention_num_buckets=32, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.relative_attention_num_buckets = relative_attention_num_buckets
class MPNetConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MPNetModel`] or a [`TFMPNetModel`]. It is used to instantiate a MPNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MPNet [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30527): Vocabulary size of the MPNet model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MPNetModel`] or [`TFMPNetModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. Examples: ```python >>> from transformers import MPNetModel, MPNetConfig >>> # Initializing a MPNet mpnet-base style configuration >>> configuration = MPNetConfig() >>> # Initializing a model from the mpnet-base style configuration >>> model = MPNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30527, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, relative_attention_num_buckets=32, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): pass
2
1
33
1
32
0
1
1.32
1
1
0
0
1
12
1
1
89
10
34
33
14
45
16
15
14
1
1
0
1
4,022
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetAttention
from torch import nn from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer class MPNetAttention(nn.Module): def __init__(self, config): super().__init__() self.attn = MPNetSelfAttention(config) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.attn.num_attention_heads, self.attn.attention_head_size, self.pruned_heads) self.attn.q = prune_linear_layer(self.attn.q, index) self.attn.k = prune_linear_layer(self.attn.k, index) self.attn.v = prune_linear_layer(self.attn.v, index) self.attn.o = prune_linear_layer(self.attn.o, index, dim=1) self.attn.num_attention_heads = self.attn.num_attention_heads - len(heads) self.attn.all_head_size = self.attn.attention_head_size * self.attn.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs): self_outputs = self.attn(hidden_states, attention_mask, head_mask, position_bias, output_attentions=output_attentions) attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class MPNetAttention(nn.Module): def __init__(self, config): pass def prune_heads(self, heads): pass def forward(self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs): pass
4
0
14
1
13
0
1
0.03
1
3
1
0
3
4
3
13
44
5
39
20
27
1
23
12
19
2
1
1
4
4,023
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetClassificationHead
import torch from torch import nn class MPNetClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x
class MPNetClassificationHead(nn.Module): '''Head for sentence-level classification tasks.''' def __init__(self, config): pass def forward(self, features, **kwargs): pass
3
1
7
0
7
1
1
0.14
1
1
0
0
2
3
2
12
17
2
14
7
11
2
14
7
11
1
1
0
2
4,024
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetEmbeddings
import torch from torch import nn class MPNetEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.padding_idx = 1 self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, **kwargs): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape)
class MPNetEmbeddings(nn.Module): def __init__(self, config): pass def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, **kwargs): pass def create_position_ids_from_inputs_embeds(self, inputs_embeds): ''' We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor ''' pass
4
1
18
3
13
2
3
0.15
1
1
0
0
3
5
3
13
57
11
40
16
36
6
32
16
28
6
1
2
8
4,025
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetEncoder
from torch import nn from typing import Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import math import torch class MPNetEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.n_heads = config.num_attention_heads self.layer = nn.ModuleList([MPNetLayer(config) for _ in range(config.num_hidden_layers)]) self.relative_attention_bias = nn.Embedding(config.relative_attention_num_buckets, self.n_heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False, **kwargs): position_bias = self.compute_position_bias(hidden_states) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], position_bias, output_attentions=output_attentions, **kwargs) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) def compute_position_bias(self, x, position_ids=None, num_buckets=32): bsz, qlen, klen = (x.size(0), x.size(1), x.size(1)) if position_ids is not None: context_position = position_ids[:, :, None] memory_position = position_ids[:, None, :] else: context_position = torch.arange(qlen, dtype=torch.long)[:, None] memory_position = torch.arange(klen, dtype=torch.long)[None, :] relative_position = memory_position - context_position rp_bucket = self.relative_position_bucket(relative_position, num_buckets=num_buckets) rp_bucket = rp_bucket.to(x.device) values = self.relative_attention_bias(rp_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) values = values.expand((bsz, -1, qlen, klen)).contiguous() return values @staticmethod def relative_position_bucket(relative_position, num_buckets=32, max_distance=128): ret = 0 n = -relative_position num_buckets //= 2 ret += (n < 0).to(torch.long) * num_buckets n = torch.abs(n) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + (torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret
class MPNetEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False, **kwargs): pass def compute_position_bias(self, x, position_ids=None, num_buckets=32): pass @staticmethod def relative_position_bucket(relative_position, num_buckets=32, max_distance=128): pass
6
0
21
3
18
0
3
0.01
1
8
2
0
3
4
4
14
87
13
73
35
58
1
49
25
44
8
1
2
12
4,026
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetForMaskedLM
from ...utils import auto_docstring, logging from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch from typing import Optional, Union class MPNetForMaskedLM(MPNetPreTrainedModel): _tied_weights_keys = ['lm_head.decoder'] def __init__(self, config): super().__init__(config) self.mpnet = MPNetModel(config, add_pooling_layer=False) self.lm_head = MPNetLMHead(config) self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings self.lm_head.bias = new_embeddings.bias @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MaskedLMOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
class MPNetForMaskedLM(MPNetPreTrainedModel): def __init__(self, config): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MaskedLMOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` ''' pass
6
1
16
2
12
2
2
0.13
1
6
3
0
4
2
4
5
74
11
56
26
34
7
25
14
20
5
2
1
8
4,027
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetForMultipleChoice
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch from torch import nn from typing import Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...utils import auto_docstring, logging @auto_docstring class MPNetForMultipleChoice(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.mpnet = MPNetModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.mpnet(flat_input_ids, position_ids=flat_position_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class MPNetForMultipleChoice(MPNetPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) ''' pass
5
1
36
5
28
4
6
0.11
1
5
2
0
2
3
2
3
79
10
62
30
42
7
27
18
24
10
2
1
11
4,028
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetForQuestionAnswering
from typing import Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...utils import auto_docstring, logging from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch @auto_docstring class MPNetForQuestionAnswering(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class MPNetForQuestionAnswering(MPNetPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: pass
5
0
41
5
29
7
4
0.2
1
5
2
0
2
3
2
3
89
11
65
29
44
13
32
16
29
7
2
2
8
4,029
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetForSequenceClassification
from typing import Optional, Union import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...utils import auto_docstring, logging @auto_docstring(custom_intro='\n MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ') class MPNetForSequenceClassification(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.classifier = MPNetClassificationHead(config) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ') class MPNetForSequenceClassification(MPNetPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
38
4
31
4
7
0.1
1
7
3
0
2
3
2
3
83
8
68
24
48
7
32
12
29
12
2
3
13
4,030
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetForTokenClassification
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from typing import Optional, Union import torch from ...utils import auto_docstring, logging @auto_docstring class MPNetForTokenClassification(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class MPNetForTokenClassification(MPNetPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
5
1
30
5
23
3
3
0.09
1
5
2
0
2
4
2
3
68
10
53
25
33
5
22
13
19
5
2
1
6
4,031
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetIntermediate
from ...activations import ACT2FN, gelu import torch from torch import nn class MPNetIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class MPNetIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
4,032
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetLMHead
from ...activations import ACT2FN, gelu from torch import nn import torch class MPNetLMHead(nn.Module): """MPNet Head for masked and permuted language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) x = self.decoder(x) return x
class MPNetLMHead(nn.Module): '''MPNet Head for masked and permuted language modeling.''' def __init__(self, config): pass def _tie_weights(self): pass def forward(self, features, **kwargs): pass
4
1
7
1
5
1
1
0.19
1
1
0
0
3
4
3
13
26
7
16
9
12
3
16
9
12
1
1
0
3
4,033
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetLayer
from torch import nn class MPNetLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = MPNetAttention(config) self.intermediate = MPNetIntermediate(config) self.output = MPNetOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs): self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, position_bias=position_bias, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs
class MPNetLayer(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs): pass
3
0
14
1
14
1
1
0.04
1
4
3
0
2
3
2
12
30
2
28
19
17
1
14
11
11
1
1
0
2
4,034
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetModel
from typing import Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch from ...utils import auto_docstring, logging @auto_docstring class MPNetModel(MPNetPreTrainedModel): def __init__(self, config, add_pooling_layer=True): """ add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = MPNetEmbeddings(config) self.encoder = MPNetEncoder(config) self.pooler = MPNetPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
@auto_docstring class MPNetModel(MPNetPreTrainedModel): def __init__(self, config, add_pooling_layer=True): ''' add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer ''' pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]: pass
8
2
15
2
13
1
3
0.07
1
8
4
0
5
4
5
6
88
12
71
30
48
5
37
18
31
11
2
1
17
4,035
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetOutput
from torch import nn import torch class MPNetOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class MPNetOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
4,036
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetPooler
from torch import nn import torch class MPNetPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class MPNetPooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
5
1
1
0.2
1
2
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
4,037
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetPreTrainedModel
from ...utils import auto_docstring, logging from .configuration_mpnet import MPNetConfig from ...modeling_utils import PreTrainedModel from torch import nn @auto_docstring class MPNetPreTrainedModel(PreTrainedModel): config: MPNetConfig base_model_prefix = 'mpnet' def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, MPNetLMHead): module.bias.data.zero_()
@auto_docstring class MPNetPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
15
0
12
3
6
0.2
1
0
0
6
1
0
1
1
19
1
15
4
13
3
13
4
11
6
1
2
6
4,038
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/modeling_mpnet.py
transformers.models.mpnet.modeling_mpnet.MPNetSelfAttention
from torch import nn import torch import math class MPNetSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.q = nn.Linear(config.hidden_size, self.all_head_size) self.k = nn.Linear(config.hidden_size, self.all_head_size) self.v = nn.Linear(config.hidden_size, self.all_head_size) self.o = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs): batch_size, seq_length, _ = hidden_states.shape q = self.q(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) k = self.k(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) v = self.v(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) attention_scores = torch.matmul(q, k.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if position_bias is not None: attention_scores += position_bias if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask c = torch.matmul(attention_probs, v) c = c.permute(0, 2, 1, 3).contiguous() new_c_shape = c.size()[:-2] + (self.all_head_size,) c = c.view(*new_c_shape) o = self.o(c) outputs = (o, attention_probs) if output_attentions else (o,) return outputs
class MPNetSelfAttention(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs): pass
3
0
23
5
17
1
3
0.06
1
3
0
0
3
8
3
13
71
16
52
30
40
3
41
22
37
5
1
1
8
4,039
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/tokenization_mpnet.py
transformers.models.mpnet.tokenization_mpnet.BasicTokenizer
import unicodedata from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace class BasicTokenizer: """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output)
class BasicTokenizer: ''' Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. ''' def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): pass def tokenize(self, text, never_split=None): ''' Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. ''' pass def _run_strip_accents(self, text): '''Strips accents from a piece of text.''' pass def _run_split_on_punc(self, text, never_split=None): '''Splits punctuation on a piece of text.''' pass def _tokenize_chinese_chars(self, text): '''Adds whitespace around any CJK character.''' pass def _is_chinese_char(self, cp): '''Checks whether CP is the codepoint of a CJK character.''' pass def _clean_text(self, text): '''Performs invalid character removal and whitespace cleanup on text.''' pass
8
7
19
1
14
5
4
0.55
0
2
0
0
7
5
7
7
159
14
98
39
83
54
78
32
70
8
0
4
27
4,040
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/tokenization_mpnet.py
transformers.models.mpnet.tokenization_mpnet.MPNetTokenizer
from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace import os import collections from typing import Optional class MPNetTokenizer(PreTrainedTokenizer): """ This tokenizer inherits from [`BertTokenizer`] which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (`str`): Path to the vocabulary file. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='[UNK]', pad_token='<pad>', mask_token='<mask>', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs): bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): vocab = self.added_tokens_encoder.copy() vocab.update(self.vocab) return vocab def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MPNet sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` methods. Args: token_ids_0 (`list[int]`): List of ids. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Set to True if the token list is already formatted with special tokens for the model Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of ids. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,)
class MPNetTokenizer(PreTrainedTokenizer): ''' This tokenizer inherits from [`BertTokenizer`] which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (`str`): Path to the vocabulary file. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. ''' def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='[UNK]', pad_token='<pad>', mask_token='<mask>', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs): pass @property def do_lower_case(self): pass @property def vocab_size(self): pass def get_vocab(self): pass def _tokenize(self, text): pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MPNet sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` methods. Args: token_ids_0 (`list[int]`): List of ids. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Set to True if the token list is already formatted with special tokens for the model Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of ids. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
15
7
15
1
11
4
3
0.71
1
8
2
0
12
5
12
101
263
36
133
57
95
95
75
31
62
10
3
3
33
4,041
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpnet/tokenization_mpnet_fast.py
transformers.models.mpnet.tokenization_mpnet_fast.MPNetTokenizerFast
from ...tokenization_utils_fast import PreTrainedTokenizerFast import json from typing import Optional from .tokenization_mpnet import MPNetTokenizer from tokenizers import normalizers from ...tokenization_utils import AddedToken class MPNetTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" MPNet tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = MPNetTokenizer model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='[UNK]', pad_token='<pad>', mask_token='<mask>', tokenize_chinese_chars=True, strip_accents=None, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if pre_tok_state.get('lowercase', do_lower_case) != do_lower_case or pre_tok_state.get('strip_accents', strip_accents) != strip_accents: pre_tok_class = getattr(normalizers, pre_tok_state.pop('type')) pre_tok_state['lowercase'] = do_lower_case pre_tok_state['strip_accents'] = strip_accents self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state) self.do_lower_case = do_lower_case @property def mask_token(self) -> str: """ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. MPNet tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily comprise the space before the *<mask>*. """ if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def mask_token(self, value): """ Overriding the default behavior of the mask token to have it eat the space before it. This is needed to preserve backward compatibility with all the previously used models based on MPNet. """ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value self._mask_token = value def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not make use of token type ids, therefore a list of zeros is returned Args: token_ids_0 (`list[int]`): List of ids. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
class MPNetTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" MPNet tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). ''' def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='[UNK]', pad_token='<pad>', mask_token='<mask>', tokenize_chinese_chars=True, strip_accents=None, **kwargs): pass @property def mask_token(self) -> str: ''' `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. MPNet tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily comprise the space before the *<mask>*. ''' pass @mask_token.setter def mask_token(self) -> str: ''' Overriding the default behavior of the mask token to have it eat the space before it. This is needed to preserve backward compatibility with all the previously used models based on MPNet. ''' pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not make use of token type ids, therefore a list of zeros is returned Args: token_ids_0 (`list[int]`): List of ids. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs Returns: `list[int]`: List of zeros. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
9
4
18
2
12
4
3
0.85
1
4
0
0
6
2
6
94
173
27
79
37
53
67
43
18
36
9
3
2
19
4,042
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/configuration_mpt.py
transformers.models.mpt.configuration_mpt.MptAttentionConfig
from ...configuration_utils import PretrainedConfig class MptAttentionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MptAttention`] class. It is used to instantiate attention layers according to the specified arguments, defining the layers architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MPT [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b) architecture. Most of the arguments are kept for backward compatibility with previous MPT models that are hosted on the Hub (previously with `trust_remote_code=True`). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: attn_type (`str`, *optional*, defaults to `"multihead_attention"`): type of attention to use. Options: `"multihead_attention"`, `"multiquery_attention"`. attn_pdrop (`float`, *optional*, defaults to `0.0`): The dropout probability for the attention layers. attn_impl (`str`, *optional*, defaults to `"torch"`): The attention implementation to use. One of `"torch"`, `"flash"`, or `"triton"`. clip_qkv (`float`, *optional*): If not `None`, clip the queries, keys, and values in the attention layer to this value. softmax_scale (`float`, *optional*): If not `None`, scale the softmax in the attention layer by this value. If `None`, will default to `1/sqrt(hidden_size)`. prefix_lm (`bool`, *optional*, defaults to `False`): Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another bi-directionally. Tokens outside the prefix use causal attention. qk_ln (`bool`, *optional*, defaults to `False`): Whether to apply layer normalization to the queries and keys in the attention layer. attn_uses_sequence_id (`bool`, *optional*, defaults to `False`): Whether to restrict attention to tokens that have the same token_type_ids. When the model is in `train` mode, this requires passing an extra *token_type_ids* argument which indicates which sub-sequence each token belongs to. Defaults to `False` meaning any provided *token_type_ids* will be ignored. alibi (`bool`, *optional*, defaults to `True`): Whether or not to use the alibi bias instead of positional embedding. alibi_bias_max (`int`, *optional*, defaults to 8): The maximum value of the alibi bias. """ base_config_key = 'attn_config' def __init__(self, attn_type='multihead_attention', attn_pdrop=0, attn_impl='torch', clip_qkv=None, softmax_scale=None, prefix_lm=False, qk_ln=False, attn_uses_sequence_id=False, alibi=True, alibi_bias_max=8, **kwargs): super().__init__() self.attn_type = attn_type self.attn_pdrop = attn_pdrop self.attn_impl = attn_impl self.clip_qkv = clip_qkv self.softmax_scale = softmax_scale self.prefix_lm = prefix_lm self.attn_uses_sequence_id = attn_uses_sequence_id self.alibi = alibi self.qk_ln = qk_ln self.alibi_bias_max = alibi_bias_max if attn_type not in ['multihead_attention', 'multiquery_attention']: raise ValueError(f'`attn_type` has to be either `multihead_attention` or `multiquery_attention`. Received: {attn_type}')
class MptAttentionConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MptAttention`] class. It is used to instantiate attention layers according to the specified arguments, defining the layers architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MPT [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b) architecture. Most of the arguments are kept for backward compatibility with previous MPT models that are hosted on the Hub (previously with `trust_remote_code=True`). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: attn_type (`str`, *optional*, defaults to `"multihead_attention"`): type of attention to use. Options: `"multihead_attention"`, `"multiquery_attention"`. attn_pdrop (`float`, *optional*, defaults to `0.0`): The dropout probability for the attention layers. attn_impl (`str`, *optional*, defaults to `"torch"`): The attention implementation to use. One of `"torch"`, `"flash"`, or `"triton"`. clip_qkv (`float`, *optional*): If not `None`, clip the queries, keys, and values in the attention layer to this value. softmax_scale (`float`, *optional*): If not `None`, scale the softmax in the attention layer by this value. If `None`, will default to `1/sqrt(hidden_size)`. prefix_lm (`bool`, *optional*, defaults to `False`): Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another bi-directionally. Tokens outside the prefix use causal attention. qk_ln (`bool`, *optional*, defaults to `False`): Whether to apply layer normalization to the queries and keys in the attention layer. attn_uses_sequence_id (`bool`, *optional*, defaults to `False`): Whether to restrict attention to tokens that have the same token_type_ids. When the model is in `train` mode, this requires passing an extra *token_type_ids* argument which indicates which sub-sequence each token belongs to. Defaults to `False` meaning any provided *token_type_ids* will be ignored. alibi (`bool`, *optional*, defaults to `True`): Whether or not to use the alibi bias instead of positional embedding. alibi_bias_max (`int`, *optional*, defaults to 8): The maximum value of the alibi bias. ''' def __init__(self, attn_type='multihead_attention', attn_pdrop=0, attn_impl='torch', clip_qkv=None, softmax_scale=None, prefix_lm=False, qk_ln=False, attn_uses_sequence_id=False, alibi=True, alibi_bias_max=8, **kwargs): pass
2
1
30
1
29
0
2
1.13
1
2
0
0
1
10
1
1
71
5
31
26
16
35
16
13
14
2
1
1
2
4,043
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/configuration_mpt.py
transformers.models.mpt.configuration_mpt.MptConfig
from typing import Optional, Union from ...configuration_utils import PretrainedConfig class MptConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MptModel`]. It is used to instantiate a Mpt model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to the Mpt-7b architecture [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: d_model (`int`, *optional*, defaults to 2048): Dimensionality of the embeddings and hidden states. n_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. n_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. expansion_ratio (`int`, *optional*, defaults to 4): The ratio of the up/down scale in the MLP. max_seq_len (`int`, *optional*, defaults to 2048): The maximum sequence length of the model. vocab_size (`int`, *optional*, defaults to 50368): Vocabulary size of the Mpt model. Defines the maximum number of different tokens that can be represented by the `inputs_ids` passed when calling [`MptModel`]. Check [this discussion](https://huggingface.co/bigscience/mpt/discussions/120#633d28389addb8530b406c2a) on how the `vocab_size` has been defined. resid_pdrop (`float`, *optional*, defaults to 0.0): The dropout probability applied to the attention output before combining with residual. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. emb_pdrop (`float`, *optional*, defaults to 0.0): The dropout probability for the embedding layer. learned_pos_emb (`bool`, *optional*, defaults to `True`): Whether to use learned positional embeddings. attn_config (`dict`, *optional*): A dictionary used to configure the model's attention module. init_device (`str`, *optional*, defaults to `"cpu"`): The device to use for parameter initialization. Defined for backward compatibility logit_scale (`float`, *optional*): If not None, scale the logits by this value. no_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in all linear layers. verbose (`int`, *optional*, defaults to 0): The verbosity level to use for logging. Used in the previous versions of MPT models for logging. This argument is deprecated. embedding_fraction (`float`, *optional*, defaults to 1.0): The fraction to scale the gradients of the embedding layer by. norm_type (`str`, *optional*, defaults to `"low_precision_layernorm"`): Type of layer norm to use. All MPT models uses the same layer norm implementation. Defined for backward compatibility. use_cache (`bool`, *optional*, defaults to `False`): Whether or not the model should return the last key/values attentions (not used by all models). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import MptConfig, MptModel >>> # Initializing a Mpt configuration >>> configuration = MptConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MptModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = 'mpt' sub_configs = {'attn_config': MptAttentionConfig} attribute_map = {'num_attention_heads': 'n_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'n_layers'} def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, layer_norm_epsilon: float=1e-05, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: MptAttentionConfig=None, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=True, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, initializer_range=0.02, **kwargs): if attn_config is None: self.attn_config = MptAttentionConfig() elif isinstance(attn_config, dict): self.attn_config = MptAttentionConfig(**attn_config) else: self.attn_config = attn_config self.d_model = d_model self.n_heads = n_heads self.n_layers = n_layers self.expansion_ratio = expansion_ratio self.max_seq_len = max_seq_len self.vocab_size = vocab_size self.resid_pdrop = resid_pdrop self.emb_pdrop = emb_pdrop self.learned_pos_emb = learned_pos_emb self.init_device = init_device self.logit_scale = logit_scale self.no_bias = no_bias self.verbose = verbose self.embedding_fraction = embedding_fraction self.norm_type = norm_type self.layer_norm_epsilon = layer_norm_epsilon self.use_cache = use_cache self.initializer_range = initializer_range super().__init__(**kwargs)
class MptConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MptModel`]. It is used to instantiate a Mpt model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to the Mpt-7b architecture [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: d_model (`int`, *optional*, defaults to 2048): Dimensionality of the embeddings and hidden states. n_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. n_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. expansion_ratio (`int`, *optional*, defaults to 4): The ratio of the up/down scale in the MLP. max_seq_len (`int`, *optional*, defaults to 2048): The maximum sequence length of the model. vocab_size (`int`, *optional*, defaults to 50368): Vocabulary size of the Mpt model. Defines the maximum number of different tokens that can be represented by the `inputs_ids` passed when calling [`MptModel`]. Check [this discussion](https://huggingface.co/bigscience/mpt/discussions/120#633d28389addb8530b406c2a) on how the `vocab_size` has been defined. resid_pdrop (`float`, *optional*, defaults to 0.0): The dropout probability applied to the attention output before combining with residual. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. emb_pdrop (`float`, *optional*, defaults to 0.0): The dropout probability for the embedding layer. learned_pos_emb (`bool`, *optional*, defaults to `True`): Whether to use learned positional embeddings. attn_config (`dict`, *optional*): A dictionary used to configure the model's attention module. init_device (`str`, *optional*, defaults to `"cpu"`): The device to use for parameter initialization. Defined for backward compatibility logit_scale (`float`, *optional*): If not None, scale the logits by this value. no_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in all linear layers. verbose (`int`, *optional*, defaults to 0): The verbosity level to use for logging. Used in the previous versions of MPT models for logging. This argument is deprecated. embedding_fraction (`float`, *optional*, defaults to 1.0): The fraction to scale the gradients of the embedding layer by. norm_type (`str`, *optional*, defaults to `"low_precision_layernorm"`): Type of layer norm to use. All MPT models uses the same layer norm implementation. Defined for backward compatibility. use_cache (`bool`, *optional*, defaults to `False`): Whether or not the model should return the last key/values attentions (not used by all models). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import MptConfig, MptModel >>> # Initializing a Mpt configuration >>> configuration = MptConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MptModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` ''' def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, layer_norm_epsilon: float=1e-05, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: MptAttentionConfig=None, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=True, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, initializer_range=0.02, **kwargs): pass
2
1
48
0
48
0
3
1.11
1
7
1
0
1
19
1
1
128
10
56
46
32
62
28
24
26
3
1
1
3
4,044
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/modeling_mpt.py
transformers.models.mpt.modeling_mpt.MptAttention
import math from ...cache_utils import Cache, DynamicCache import torch from torch import nn from typing import Optional, Union from .configuration_mpt import MptConfig from ...utils.deprecation import deprecate_kwarg class MptAttention(nn.Module): """Multi-head self attention. Using torch or triton attention implementation enables user to also use additive bias. """ def __init__(self, config: MptConfig, layer_idx: Optional[int]=None): super().__init__() self.hidden_size = config.hidden_size self.n_heads = config.n_heads self.max_seq_length = config.max_seq_len self.head_dim = self.hidden_size // self.n_heads self.softmax_scale = config.attn_config.softmax_scale if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.hidden_size / self.n_heads) self.attn_dropout_p = config.attn_config.attn_pdrop self.clip_qkv = config.attn_config.clip_qkv self.Wqkv = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.layer_idx = layer_idx @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_bias: torch.Tensor, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.Tensor]=None): batch_size, seq_length = hidden_states.shape[:2] mixed_qkv = self.Wqkv(hidden_states) if self.clip_qkv: mixed_qkv = mixed_qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv) query_states, key_states, value_states = mixed_qkv.chunk(3, dim=2) query_states = query_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2) key_states = key_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2) value_states = value_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: cache_kwargs = {'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) * self.softmax_scale query_length = seq_length if past_key_values is None else seq_length + past_key_values.get_seq_length() if position_bias is not None: if len(position_bias.shape) != 3: raise ValueError(f'Expecting position_bias shape to be 3 dimensions, got {len(position_bias.shape)}') key_length = key_states.shape[-2] position_bias_query_index = max(0, position_bias.size(1) - query_length) position_bias_key_index = max(0, position_bias.size(2) - key_length) position_bias = position_bias[:, position_bias_query_index:, position_bias_key_index:] attention_scores = attention_scores + position_bias if attention_mask is not None: attention_scores = attention_scores.masked_fill(attention_mask, torch.finfo(query_states.dtype).min) attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).to(value_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attn_dropout_p, training=self.training) context_states = torch.matmul(attn_weights, value_states) context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1) attn_output = self.out_proj(context_states) return (attn_output, attn_weights)
class MptAttention(nn.Module): '''Multi-head self attention. Using torch or triton attention implementation enables user to also use additive bias. ''' def __init__(self, config: MptConfig, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_bias: torch.Tensor, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.Tensor]=None): pass
4
1
34
7
27
1
5
0.07
1
4
1
0
2
9
2
12
74
16
54
29
45
4
47
23
44
8
1
2
10
4,045
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/modeling_mpt.py
transformers.models.mpt.modeling_mpt.MptBlock
from ...modeling_layers import GradientCheckpointingLayer from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from .configuration_mpt import MptConfig import torch from ...cache_utils import Cache, DynamicCache from torch import nn from typing import Optional, Union class MptBlock(GradientCheckpointingLayer): def __init__(self, config: MptConfig, layer_idx: Optional[int]=None): super().__init__() hidden_size = config.hidden_size self.norm_1 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.norm_1.bias = None self.num_heads = config.n_heads self.attn = MptAttention(config, layer_idx) self.norm_2 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.norm_2.bias = None self.ffn = MptMLP(config) self.dropout_rate = config.attn_config.attn_pdrop self.resid_attn_dropout = nn.Dropout(self.dropout_rate) def forward(self, hidden_states: torch.Tensor, position_bias: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Cache]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None): layernorm_output = self.norm_1(hidden_states) residual = hidden_states attn_outputs, attn_weights = self.attn(layernorm_output, position_bias=position_bias, attention_mask=attention_mask, past_key_values=layer_past, cache_position=cache_position) hidden_states = self.resid_attn_dropout(attn_outputs) + residual layernorm_output = self.norm_2(hidden_states) residual = hidden_states output = self.ffn(layernorm_output, residual) return (output, attn_weights)
class MptBlock(GradientCheckpointingLayer): def __init__(self, config: MptConfig, layer_idx: Optional[int]=None): pass def forward(self, hidden_states: torch.Tensor, position_bias: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Cache]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None): pass
3
0
30
7
20
4
2
0.2
1
6
3
0
2
7
2
12
62
15
40
24
29
8
27
16
24
3
1
1
4
4,046
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/modeling_mpt.py
transformers.models.mpt.modeling_mpt.MptForCausalLM
from ...utils import auto_docstring, logging from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...generation import GenerationMixin from ...cache_utils import Cache, DynamicCache from torch import nn from typing import Optional, Union from .configuration_mpt import MptConfig import torch @auto_docstring(custom_intro='\n The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ') class MptForCausalLM(MptPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: MptConfig): super().__init__(config) self.transformer = MptModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def set_output_embeddings(self, new_embeddings: torch.Tensor): self.lm_head = new_embeddings @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: """ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(lm_logits.device) loss = self.loss_function(lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
@auto_docstring(custom_intro='\n The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ') class MptForCausalLM(MptPreTrainedModel, GenerationMixin): def __init__(self, config: MptConfig): pass def set_output_embeddings(self, new_embeddings: torch.Tensor): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` ''' pass
6
1
18
1
14
3
2
0.21
2
7
3
0
5
2
5
8
104
12
76
32
50
16
28
16
22
5
2
1
9
4,047
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/modeling_mpt.py
transformers.models.mpt.modeling_mpt.MptForQuestionAnswering
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from typing import Optional, Union from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput from torch import nn from ...utils import auto_docstring, logging import torch @auto_docstring class MptForQuestionAnswering(MptPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = MptModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer(input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class MptForQuestionAnswering(MptPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) ''' pass
5
1
37
4
27
7
4
0.24
1
4
2
0
2
2
2
5
77
9
55
26
41
13
31
15
28
7
2
2
8
4,048
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/modeling_mpt.py
transformers.models.mpt.modeling_mpt.MptForSequenceClassification
from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from ...cache_utils import Cache, DynamicCache from typing import Optional, Union import torch from ...utils import auto_docstring, logging from .configuration_mpt import MptConfig from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput @auto_docstring(custom_intro='\n The MPT Model transformer with a sequence classification head on top (linear layer).\n\n [`MptForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ') class MptForSequenceClassification(MptPreTrainedModel): def __init__(self, config: MptConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = MptModel(config) self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutputWithPast]: """ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
null
5
1
49
5
41
4
9
0.09
1
8
3
0
2
3
2
5
106
10
88
29
68
8
45
17
42
16
2
3
17
4,049
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/modeling_mpt.py
transformers.models.mpt.modeling_mpt.MptForTokenClassification
from ...utils import auto_docstring, logging from typing import Optional, Union from ...cache_utils import Cache, DynamicCache from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput import torch from torch import nn from .configuration_mpt import MptConfig @auto_docstring class MptForTokenClassification(MptPreTrainedModel): def __init__(self, config: MptConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = MptModel(config) if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) batch_size, seq_length = labels.shape loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)) if not return_dict: output = (logits,) + transformer_outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
@auto_docstring class MptForTokenClassification(MptPreTrainedModel): def __init__(self, config: MptConfig): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
36
4
29
4
4
0.13
1
6
3
0
2
4
2
5
80
8
64
28
43
8
28
15
25
5
2
1
8
4,050
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/modeling_mpt.py
transformers.models.mpt.modeling_mpt.MptMLP
from torch import nn from .configuration_mpt import MptConfig from torch.nn import functional as F import torch class MptMLP(nn.Module): def __init__(self, config: MptConfig): super().__init__() hidden_size = config.hidden_size self.up_proj = nn.Linear(hidden_size, 4 * hidden_size, bias=False) self.act = nn.GELU(approximate='none') self.down_proj = nn.Linear(4 * hidden_size, hidden_size, bias=False) self.hidden_dropout = config.attn_config.attn_pdrop def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor: hidden_states = self.act(self.up_proj(hidden_states)) intermediate_output = self.down_proj(hidden_states) output = F.dropout(intermediate_output, p=self.hidden_dropout, training=self.training) output = output + residual return output
class MptMLP(nn.Module): def __init__(self, config: MptConfig): pass def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor: pass
3
0
9
2
7
0
1
0
1
3
1
0
2
4
2
12
19
5
14
10
11
0
14
10
11
1
1
0
2
4,051
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/modeling_mpt.py
transformers.models.mpt.modeling_mpt.MptModel
import torch from typing import Optional, Union from torch import nn from ...utils import auto_docstring, logging from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from .configuration_mpt import MptConfig from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...cache_utils import Cache, DynamicCache @auto_docstring class MptModel(MptPreTrainedModel): def __init__(self, config: MptConfig): super().__init__(config) self.hidden_size = config.hidden_size self.num_heads = config.n_heads self.wte = nn.Embedding(config.vocab_size, self.hidden_size) self.blocks = nn.ModuleList([MptBlock(config, layer_idx=i) for i in range(config.n_layers)]) self.norm_f = LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon) self.norm_f.bias = None self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.wte def build_mpt_alibi_tensor(self, num_heads, sequence_length, alibi_bias_max=8, device=None): return build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max, device) def set_input_embeddings(self, new_embeddings: torch.Tensor): self.wte = new_embeddings @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: """ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError('You have to specify either input_ids or inputs_embeds') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `DynamicCache` instead, e.g. `past_key_values=DynamicCache.from_legacy_cache(past_key_values)`.') past_key_values = DynamicCache.from_legacy_cache(past_key_values) hidden_states = inputs_embeds all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 seq_length_with_past = seq_length + past_key_values_length if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) alibi = self.build_mpt_alibi_tensor(self.num_heads, self.config.max_seq_len, device=hidden_states.device) causal_mask = _prepare_4d_causal_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length) causal_mask = causal_mask.bool() for block in self.blocks: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block(hidden_states, layer_past=past_key_values, attention_mask=causal_mask, use_cache=use_cache, output_attentions=output_attentions, position_bias=alibi, cache_position=cache_position) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) hidden_states = self.norm_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions)
@auto_docstring class MptModel(MptPreTrainedModel): def __init__(self, config: MptConfig): pass def get_input_embeddings(self): pass def build_mpt_alibi_tensor(self, num_heads, sequence_length, alibi_bias_max=8, device=None): pass def set_input_embeddings(self, new_embeddings: torch.Tensor): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) ''' pass
8
1
28
5
22
2
6
0.07
1
10
3
0
5
6
5
8
149
27
115
36
92
8
67
24
61
25
2
2
29
4,052
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mpt/modeling_mpt.py
transformers.models.mpt.modeling_mpt.MptPreTrainedModel
from torch import nn from .configuration_mpt import MptConfig import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from ...utils import auto_docstring, logging from ...modeling_utils import PreTrainedModel from ...utils.deprecation import deprecate_kwarg @auto_docstring class MptPreTrainedModel(PreTrainedModel): config: MptConfig base_model_prefix = 'transformer' supports_gradient_checkpointing = True _no_split_modules = ['MptBlock'] _keys_to_ignore_on_load_missing = ['lm_head.*.'] def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module: nn.Module): """Initialize the weights.""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, LayerNorm): if module.bias is not None: module.bias.data.zero_() module.weight.data.fill_(1.0) @staticmethod @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def _convert_to_mpt_cache(past_key_values: tuple[tuple[torch.Tensor, torch.Tensor]]) -> tuple[tuple[torch.Tensor, torch.Tensor]]: """ Converts the cache to the format expected by Mpt, i.e. to tuple(tuple([batch_size * num_heads, ...])) """ batch_size, num_heads, head_dim, seq_length = past_key_values[0][0].shape batch_size_times_num_heads = batch_size * num_heads return tuple(((layer_past[0].reshape(batch_size_times_num_heads, head_dim, seq_length), layer_past[1].reshape(batch_size_times_num_heads, seq_length, head_dim)) for layer_past in past_key_values))
@auto_docstring class MptPreTrainedModel(PreTrainedModel): def __init__(self, *inputs, **kwargs): pass def _init_weights(self, module: nn.Module): '''Initialize the weights.''' pass @staticmethod @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def _convert_to_mpt_cache(past_key_values: tuple[tuple[torch.Tensor, torch.Tensor]]) -> tuple[tuple[torch.Tensor, torch.Tensor]]: ''' Converts the cache to the format expected by Mpt, i.e. to tuple(tuple([batch_size * num_heads, ...])) ''' pass
7
2
12
0
9
3
3
0.24
1
3
0
5
2
0
3
3
45
3
34
14
27
8
23
11
19
7
1
2
9
4,053
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/configuration_mra.py
transformers.models.mra.configuration_mra.MraConfig
from ...configuration_utils import PretrainedConfig class MraConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MraModel`]. It is used to instantiate an MRA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mra [uw-madison/mra-base-512-4](https://huggingface.co/uw-madison/mra-base-512-4) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the Mra model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MraModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 1): The vocabulary size of the `token_type_ids` passed when calling [`MraModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. block_per_row (`int`, *optional*, defaults to 4): Used to set the budget for the high resolution scale. approx_mode (`str`, *optional*, defaults to `"full"`): Controls whether both low and high resolution approximations are used. Set to `"full"` for both low and high resolution and `"sparse"` for only low resolution. initial_prior_first_n_blocks (`int`, *optional*, defaults to 0): The initial number of blocks for which high resolution is used. initial_prior_diagonal_n_blocks (`int`, *optional*, defaults to 0): The number of diagonal blocks for which high resolution is used. Example: ```python >>> from transformers import MraConfig, MraModel >>> # Initializing a Mra uw-madison/mra-base-512-4 style configuration >>> configuration = MraConfig() >>> # Initializing a model (with random weights) from the uw-madison/mra-base-512-4 style configuration >>> model = MraModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'mra' def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=1, initializer_range=0.02, layer_norm_eps=1e-05, position_embedding_type='absolute', block_per_row=4, approx_mode='full', initial_prior_first_n_blocks=0, initial_prior_diagonal_n_blocks=0, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.block_per_row = block_per_row self.approx_mode = approx_mode self.initial_prior_first_n_blocks = initial_prior_first_n_blocks self.initial_prior_diagonal_n_blocks = initial_prior_diagonal_n_blocks
class MraConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MraModel`]. It is used to instantiate an MRA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mra [uw-madison/mra-base-512-4](https://huggingface.co/uw-madison/mra-base-512-4) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the Mra model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MraModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 1): The vocabulary size of the `token_type_ids` passed when calling [`MraModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. block_per_row (`int`, *optional*, defaults to 4): Used to set the budget for the high resolution scale. approx_mode (`str`, *optional*, defaults to `"full"`): Controls whether both low and high resolution approximations are used. Set to `"full"` for both low and high resolution and `"sparse"` for only low resolution. initial_prior_first_n_blocks (`int`, *optional*, defaults to 0): The initial number of blocks for which high resolution is used. initial_prior_diagonal_n_blocks (`int`, *optional*, defaults to 0): The number of diagonal blocks for which high resolution is used. Example: ```python >>> from transformers import MraConfig, MraModel >>> # Initializing a Mra uw-madison/mra-base-512-4 style configuration >>> configuration = MraConfig() >>> # Initializing a model (with random weights) from the uw-madison/mra-base-512-4 style configuration >>> model = MraModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=1, initializer_range=0.02, layer_norm_eps=1e-05, position_embedding_type='absolute', block_per_row=4, approx_mode='full', initial_prior_first_n_blocks=0, initial_prior_diagonal_n_blocks=0, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): pass
2
1
43
1
42
0
1
1.27
1
1
0
0
1
17
1
1
111
11
44
43
19
56
21
20
19
1
1
0
1
4,054
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraAttention
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from torch import nn class MraAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = MraSelfAttention(config, position_embedding_type=position_embedding_type) self.output = MraSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None): self_outputs = self.self(hidden_states, attention_mask) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class MraAttention(nn.Module): def __init__(self, config, position_embedding_type=None): pass def prune_heads(self, heads): pass def forward(self, hidden_states, attention_mask=None): pass
4
0
9
1
8
1
1
0.13
1
4
2
0
3
3
3
13
30
4
24
11
20
3
22
11
18
2
1
1
4
4,055
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraClassificationHead
from ...activations import ACT2FN from torch import nn class MraClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x
class MraClassificationHead(nn.Module): '''Head for sentence-level classification tasks.''' def __init__(self, config): pass def forward(self, features, **kwargs): pass
3
1
8
1
7
1
1
0.13
1
1
0
0
2
4
2
12
19
3
15
8
12
2
15
8
12
1
1
0
2
4,056
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraEmbeddings
import torch from torch import nn class MraEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)) + 2) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class MraEmbeddings(nn.Module): '''Construct the embeddings from word, position and token_type embeddings.''' def __init__(self, config): pass def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): pass
3
1
26
4
20
3
4
0.17
1
1
0
0
2
6
2
12
56
9
40
16
37
7
34
16
31
7
1
2
8
4,057
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraEncoder
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn class MraEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([MraLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, head_mask=None, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask) hidden_states = layer_outputs[0] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states] if v is not None)) return BaseModelOutputWithCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
class MraEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, head_mask=None, output_hidden_states=False, return_dict=True): pass
3
0
20
3
17
0
4
0
1
6
2
0
2
3
2
12
41
6
35
16
25
0
20
9
17
7
1
2
8
4,058
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraForMaskedLM
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch from typing import Optional, Union from ...utils import auto_docstring, is_cuda_platform, is_ninja_available, is_torch_cuda_available, logging @auto_docstring class MraForMaskedLM(MraPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias'] def __init__(self, config): super().__init__(config) self.mra = MraModel(config) self.cls = MraOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class MraForMaskedLM(MraPreTrainedModel): def __init__(self, config): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. ''' pass
7
1
16
2
12
2
2
0.14
1
6
3
0
4
2
4
5
74
11
56
26
34
8
25
14
20
5
2
1
8
4,059
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraForMultipleChoice
import torch from typing import Optional, Union from ...utils import auto_docstring, is_cuda_platform, is_ninja_available, is_torch_cuda_available, logging from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn @auto_docstring class MraForMultipleChoice(MraPreTrainedModel): def __init__(self, config): super().__init__(config) self.mra = MraModel(config) self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.mra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_state = outputs[0] pooled_output = hidden_state[:, 0] pooled_output = self.pre_classifier(pooled_output) pooled_output = nn.ReLU()(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class MraForMultipleChoice(MraPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) ''' pass
5
1
37
5
29
6
6
0.17
1
5
2
0
2
3
2
3
82
10
65
27
45
11
30
15
27
11
2
1
12
4,060
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraForQuestionAnswering
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn import torch from typing import Optional, Union from ...utils import auto_docstring, is_cuda_platform, is_ninja_available, is_torch_cuda_available, logging @auto_docstring class MraForQuestionAnswering(MraPreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.mra = MraModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class MraForQuestionAnswering(MraPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]: pass
5
0
41
5
30
7
4
0.2
1
5
2
0
2
3
2
3
90
11
66
29
45
13
33
16
30
7
2
2
8
4,061
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraForSequenceClassification
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch from typing import Optional, Union from ...utils import auto_docstring, is_cuda_platform, is_ninja_available, is_torch_cuda_available, logging @auto_docstring(custom_intro='\n MRA Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.\n ') class MraForSequenceClassification(MraPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mra = MraModel(config) self.classifier = MraClassificationHead(config) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n MRA Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.\n ') class MraForSequenceClassification(MraPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
37
3
31
4
7
0.1
1
7
3
0
2
3
2
3
82
7
68
24
48
7
32
12
29
12
2
3
13
4,062
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraForTokenClassification
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn import torch from typing import Optional, Union from ...utils import auto_docstring, is_cuda_platform, is_ninja_available, is_torch_cuda_available, logging @auto_docstring class MraForTokenClassification(MraPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mra = MraModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where(active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class MraForTokenClassification(MraPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
5
1
34
4
27
3
4
0.1
1
5
2
0
2
4
2
3
76
9
61
28
41
6
27
16
24
6
2
2
7
4,063
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraIntermediate
import torch from torch import nn from ...activations import ACT2FN class MraIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class MraIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
4,064
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraLMPredictionHead
import torch from torch import nn class MraLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = MraPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states
class MraLMPredictionHead(nn.Module): def __init__(self, config): pass def _tie_weights(self): pass def forward(self, hidden_states): pass
4
0
6
1
4
1
1
0.23
1
2
1
0
3
3
3
13
21
5
13
7
9
3
13
7
9
1
1
0
3
4,065
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraLayer
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...modeling_layers import GradientCheckpointingLayer class MraLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = MraAttention(config) self.add_cross_attention = config.add_cross_attention self.intermediate = MraIntermediate(config) self.output = MraOutput(config) def forward(self, hidden_states, attention_mask=None): self_attention_outputs = self.attention(hidden_states, attention_mask) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class MraLayer(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None): pass def feed_forward_chunk(self, attention_output): pass
4
0
8
1
7
0
1
0.05
1
4
3
0
3
6
3
13
27
5
22
16
18
1
20
16
16
1
1
0
3
4,066
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraModel
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch from typing import Optional, Union from ...utils import auto_docstring, is_cuda_platform, is_ninja_available, is_torch_cuda_available, logging @auto_docstring class MraModel(MraPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = MraEmbeddings(config) self.encoder = MraEncoder(config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithCrossAttentions]: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithCrossAttentions(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
@auto_docstring class MraModel(MraPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithCrossAttentions]: pass
8
1
19
2
14
2
3
0.15
1
7
3
0
5
3
5
6
106
15
79
30
57
12
41
19
35
11
2
2
16
4,067
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraOnlyMLMHead
import torch from torch import nn class MraOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = MraLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores
class MraOnlyMLMHead(nn.Module): def __init__(self, config): pass def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: pass
3
0
3
0
3
0
1
0
1
3
1
0
2
1
2
12
8
1
7
5
4
0
7
5
4
1
1
0
2
4,068
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraOutput
import torch from torch import nn class MraOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class MraOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
4,069
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraPreTrainedModel
from ...utils import auto_docstring, is_cuda_platform, is_ninja_available, is_torch_cuda_available, logging from ...modeling_utils import PreTrainedModel from .configuration_mra import MraConfig from torch import nn @auto_docstring class MraPreTrainedModel(PreTrainedModel): config: MraConfig base_model_prefix = 'mra' supports_gradient_checkpointing = True def _init_weights(self, module: nn.Module): """Initialize the weights""" std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, MraLMPredictionHead): module.bias.data.zero_()
@auto_docstring class MraPreTrainedModel(PreTrainedModel): def _init_weights(self, module: nn.Module): '''Initialize the weights''' pass
3
1
15
0
12
3
6
0.44
1
0
0
6
1
0
1
1
25
2
16
5
14
7
14
5
12
6
1
2
6
4,070
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraPredictionHeadTransform
from ...activations import ACT2FN import torch from torch import nn class MraPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class MraPredictionHeadTransform(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
7
0
7
0
2
0
1
3
0
0
2
3
2
12
15
1
14
6
11
0
13
6
10
2
1
1
3
4,071
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraReduceSum
import torch class MraReduceSum: @staticmethod def operator_call(sparse_query, indices, query_num_block, key_num_block): batch_size, num_block, block_size, _ = sparse_query.size() if len(sparse_query.size()) != 4: raise ValueError('sparse_query must be a 4-dimensional tensor.') if len(indices.size()) != 2: raise ValueError('indices must be a 2-dimensional tensor.') _, _, block_size, _ = sparse_query.size() batch_size, num_block = indices.size() sparse_query = sparse_query.sum(dim=2).reshape(batch_size * num_block, block_size) batch_idx = torch.arange(indices.size(0), dtype=torch.long, device=indices.device) global_idxes = (torch.div(indices, key_num_block, rounding_mode='floor').long() + batch_idx[:, None] * query_num_block).reshape(batch_size * num_block) temp = torch.zeros((batch_size * query_num_block, block_size), dtype=sparse_query.dtype, device=sparse_query.device) output = temp.index_add(0, global_idxes, sparse_query).reshape(batch_size, query_num_block, block_size) output = output.reshape(batch_size, query_num_block * block_size) return output
class MraReduceSum: @staticmethod def operator_call(sparse_query, indices, query_num_block, key_num_block): pass
3
0
25
6
19
0
3
0
0
1
0
0
0
0
1
1
27
6
21
8
18
0
16
7
14
3
0
1
3
4,072
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraSampledDenseMatMul
import torch class MraSampledDenseMatMul(torch.autograd.Function): @staticmethod def forward(ctx, dense_query, dense_key, indices, block_size): sparse_qk_prod = mm_to_sparse(dense_query, dense_key, indices, block_size) ctx.save_for_backward(dense_query, dense_key, indices) ctx.block_size = block_size return sparse_qk_prod @staticmethod def backward(ctx, grad): dense_query, dense_key, indices = ctx.saved_tensors block_size = ctx.block_size query_num_block = dense_query.size(1) // block_size key_num_block = dense_key.size(1) // block_size indices_T = transpose_indices(indices, query_num_block, key_num_block) grad_key = sparse_dense_mm(grad.transpose(-1, -2), indices_T, dense_query, key_num_block) grad_query = sparse_dense_mm(grad, indices, dense_key, query_num_block) return (grad_query, grad_key, None, None) @staticmethod def operator_call(dense_query, dense_key, indices, block_size=32): return MraSampledDenseMatMul.apply(dense_query, dense_key, indices, block_size)
class MraSampledDenseMatMul(torch.autograd.Function): @staticmethod def forward(ctx, dense_query, dense_key, indices, block_size): pass @staticmethod def backward(ctx, grad): pass @staticmethod def operator_call(dense_query, dense_key, indices, block_size=32): pass
7
0
5
0
5
0
1
0
1
0
0
0
0
0
3
33
22
2
20
15
13
0
17
12
13
1
5
0
3
4,073
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraSelfAttention
from ...utils import auto_docstring, is_cuda_platform, is_ninja_available, is_torch_cuda_available, logging import torch from torch import nn class MraSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') kernel_loaded = mra_cuda_kernel is not None if is_torch_cuda_available() and is_cuda_platform() and is_ninja_available() and (not kernel_loaded): try: load_cuda_kernels() except Exception as e: logger.warning(f'Could not load the custom kernel for multi-scale deformable attention: {e}') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type if position_embedding_type is not None else config.position_embedding_type self.num_block = config.max_position_embeddings // 32 * config.block_per_row self.num_block = min(self.num_block, int((config.max_position_embeddings // 32) ** 2)) self.approx_mode = config.approx_mode self.initial_prior_first_n_blocks = config.initial_prior_first_n_blocks self.initial_prior_diagonal_n_blocks = config.initial_prior_diagonal_n_blocks def forward(self, hidden_states, attention_mask=None): batch_size, seq_len, _ = hidden_states.shape query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) attention_mask = 1.0 + attention_mask / 10000.0 attention_mask = attention_mask.squeeze().repeat(1, self.num_attention_heads, 1).reshape(batch_size * self.num_attention_heads, seq_len).int() gpu_warp_size = 32 if self.attention_head_size < gpu_warp_size: pad_size = (batch_size, self.num_attention_heads, seq_len, gpu_warp_size - self.attention_head_size) query_layer = torch.cat([query_layer, torch.zeros(pad_size, device=query_layer.device)], dim=-1) key_layer = torch.cat([key_layer, torch.zeros(pad_size, device=key_layer.device)], dim=-1) value_layer = torch.cat([value_layer, torch.zeros(pad_size, device=value_layer.device)], dim=-1) context_layer = mra2_attention(query_layer.float(), key_layer.float(), value_layer.float(), attention_mask.float(), self.num_block, approx_mode=self.approx_mode, initial_prior_first_n_blocks=self.initial_prior_first_n_blocks, initial_prior_diagonal_n_blocks=self.initial_prior_diagonal_n_blocks) if self.attention_head_size < gpu_warp_size: context_layer = context_layer[:, :, :, :self.attention_head_size] context_layer = context_layer.reshape(batch_size, self.num_attention_heads, seq_len, self.attention_head_size) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer,) return outputs
class MraSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): pass def forward(self, hidden_states, attention_mask=None): pass
3
0
29
6
22
1
3
0.04
1
4
0
0
3
12
3
13
90
20
67
29
63
3
51
28
47
5
1
2
9
4,074
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraSelfOutput
import torch from torch import nn class MraSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class MraSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
4,075
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mra/modeling_mra.py
transformers.models.mra.modeling_mra.MraSparseDenseMatMul
import torch class MraSparseDenseMatMul(torch.autograd.Function): @staticmethod def forward(ctx, sparse_query, indices, dense_key, query_num_block): sparse_qk_prod = sparse_dense_mm(sparse_query, indices, dense_key, query_num_block) ctx.save_for_backward(sparse_query, indices, dense_key) ctx.query_num_block = query_num_block return sparse_qk_prod @staticmethod def backward(ctx, grad): sparse_query, indices, dense_key = ctx.saved_tensors query_num_block = ctx.query_num_block key_num_block = dense_key.size(1) // sparse_query.size(-1) indices_T = transpose_indices(indices, query_num_block, key_num_block) grad_key = sparse_dense_mm(sparse_query.transpose(-1, -2), indices_T, grad, key_num_block) grad_query = mm_to_sparse(grad, dense_key, indices) return (grad_query, None, grad_key, None) @staticmethod def operator_call(sparse_query, indices, dense_key, query_num_block): return MraSparseDenseMatMul.apply(sparse_query, indices, dense_key, query_num_block)
class MraSparseDenseMatMul(torch.autograd.Function): @staticmethod def forward(ctx, sparse_query, indices, dense_key, query_num_block): pass @staticmethod def backward(ctx, grad): pass @staticmethod def operator_call(sparse_query, indices, dense_key, query_num_block): pass
7
0
5
0
5
0
1
0
1
0
0
0
0
0
3
33
21
2
19
14
12
0
16
11
12
1
5
0
3
4,076
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/configuration_mt5.py
transformers.models.mt5.configuration_mt5.MT5Config
from ...configuration_utils import PretrainedConfig class MT5Config(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MT5Model`] or a [`TFMT5Model`]. It is used to instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the mT5 [google/mt5-small](https://huggingface.co/google/mt5-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 250112): Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. In the conventional context, it is typically expected that `d_kv` has to be equal to `d_model // num_heads`. But in the architecture of mt5-small, `d_kv` is not equal to `d_model //num_heads`. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 1024): Size of the intermediate feed forward layer in each `T5Block`. num_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = 'mt5' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', 'head_dim': 'd_kv'} def __init__(self, vocab_size=250112, d_model=512, d_kv=64, d_ff=1024, num_layers=8, num_decoder_layers=None, num_heads=6, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='gated-gelu', is_encoder_decoder=True, use_cache=True, tokenizer_class='T5Tokenizer', tie_word_embeddings=False, pad_token_id=0, eos_token_id=1, decoder_start_token_id=0, classifier_dropout=0.0, **kwargs): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.classifier_dropout = classifier_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache act_info = self.feed_forward_proj.split('-') self.dense_act_fn = act_info[-1] self.is_gated_act = act_info[0] == 'gated' if len(act_info) > 1 and act_info[0] != 'gated' or len(act_info) > 2: raise ValueError(f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. Please make sure `feed_forward_proj` is of the format `gated-{{ACT_FN}}` or `{{ACT_FN}}`, e.g. 'gated-gelu' or 'relu'") if feed_forward_proj == 'gated-gelu': self.dense_act_fn = 'gelu_new' super().__init__(is_encoder_decoder=is_encoder_decoder, tokenizer_class=tokenizer_class, tie_word_embeddings=tie_word_embeddings, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs)
class MT5Config(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MT5Model`] or a [`TFMT5Model`]. It is used to instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the mT5 [google/mt5-small](https://huggingface.co/google/mt5-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 250112): Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. In the conventional context, it is typically expected that `d_kv` has to be equal to `d_model // num_heads`. But in the architecture of mt5-small, `d_kv` is not equal to `d_model //num_heads`. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 1024): Size of the intermediate feed forward layer in each `T5Block`. num_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). ''' def __init__(self, vocab_size=250112, d_model=512, d_kv=64, d_ff=1024, num_layers=8, num_decoder_layers=None, num_heads=6, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='gated-gelu', is_encoder_decoder=True, use_cache=True, tokenizer_class='T5Tokenizer', tie_word_embeddings=False, pad_token_id=0, eos_token_id=1, decoder_start_token_id=0, classifier_dropout=0.0, **kwargs): pass
2
1
67
4
62
2
4
0.62
1
2
0
0
1
17
1
1
122
8
71
47
45
44
28
23
26
4
1
1
4
4,077
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/configuration_mt5.py
transformers.models.mt5.configuration_mt5.MT5OnnxConfig
from collections.abc import Mapping from ...onnx import OnnxSeq2SeqConfigWithPast class MT5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = {'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}} if self.use_past: common_inputs['attention_mask'][1] = 'past_encoder_sequence + sequence' common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') return common_inputs @property def default_onnx_opset(self) -> int: return 13 @property def atol_for_validation(self) -> float: return 0.0005
class MT5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def default_onnx_opset(self) -> int: pass @property def atol_for_validation(self) -> float: pass
7
0
7
1
6
0
2
0.09
1
3
0
0
3
0
3
3
29
4
23
8
16
2
16
5
12
3
1
1
5
4,078
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5Attention
from torch import nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...utils.deprecation import deprecate_kwarg from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from typing import Optional, Union from .configuration_mt5 import MT5Config import math class MT5Attention(nn.Module): def __init__(self, config: MT5Config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads) self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) max_exact = num_buckets // 2 is_small = relative_position < max_exact relative_position_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long) relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None, cache_position=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device if cache_position is None: context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] else: context_position = cache_position[:, None].to(device) memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_position_bucket(relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance) values = self.relative_attention_bias(relative_position_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) return values @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ batch_size, seq_length = hidden_states.shape[:2] is_cross_attention = key_value_states is not None query_states = self.q(hidden_states) query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) is_updated = False if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k(current_states) value_states = self.v(current_states) key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: key_length = key_states.shape[-2] real_seq_length = query_length if query_length is not None else cache_position[-1] + 1 if not self.has_relative_attention_bias: position_bias = torch.zeros((1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device, cache_position=cache_position) position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: causal_mask = mask[:, :, :, :key_states.shape[-2]] position_bias = position_bias + causal_mask if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(batch_size, -1, self.inner_dim) attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs
class MT5Attention(nn.Module): def __init__(self, config: MT5Config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass def prune_heads(self, heads): pass @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): ''' Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) ''' pass def compute_bias(self, query_length, key_length, device=None, cache_position=None): '''Compute binned relative position bias''' pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None): ''' Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). ''' pass
8
3
44
5
32
8
5
0.26
1
5
1
0
4
17
5
15
226
28
160
67
136
42
113
49
107
16
1
3
26
4,079
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5Block
from torch import nn import torch from ...utils.deprecation import deprecate_kwarg from typing import Optional, Union from ...modeling_layers import GradientCheckpointingLayer class MT5Block(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(MT5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx)) if self.is_decoder: self.layer.append(MT5LayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(MT5LayerFF(config)) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None): self_attention_outputs = self.layer[0](hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = self_attention_outputs[0] attention_outputs = self_attention_outputs[1:] if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1](hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions) hidden_states = cross_attention_outputs[0] if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) attention_outputs = attention_outputs + cross_attention_outputs[1:] hidden_states = self.layer[-1](hidden_states) if hidden_states.dtype == torch.float16: clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) return outputs + attention_outputs
class MT5Block(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None): pass
4
0
48
5
41
4
4
0.09
1
5
3
0
2
2
2
12
98
11
82
26
64
7
33
11
30
6
1
2
8
4,080
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5ClassificationHead
from torch import nn from .configuration_mt5 import MT5Config import torch class MT5ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config: MT5Config): super().__init__() self.dense = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(p=config.classifier_dropout) self.out_proj = nn.Linear(config.d_model, config.num_labels) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states
class MT5ClassificationHead(nn.Module): '''Head for sentence-level classification tasks.''' def __init__(self, config: MT5Config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
1
6
0
6
0
1
0.08
1
3
1
0
2
3
2
12
16
2
13
6
10
1
13
6
10
1
1
0
2
4,081
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5DenseActDense
import torch from ...activations import ACT2FN from torch import nn from .configuration_mt5 import MT5Config class MT5DenseActDense(nn.Module): def __init__(self, config: MT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class MT5DenseActDense(nn.Module): def __init__(self, config: MT5Config): pass def forward(self, hidden_states): pass
3
0
9
0
9
0
2
0
1
3
1
0
2
4
2
12
20
1
19
7
16
0
15
7
12
2
1
1
3
4,082
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5DenseGatedActDense
from torch import nn from .configuration_mt5 import MT5Config from ...activations import ACT2FN import torch class MT5DenseGatedActDense(nn.Module): def __init__(self, config: MT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class MT5DenseGatedActDense(nn.Module): def __init__(self, config: MT5Config): pass def forward(self, hidden_states): pass
3
0
13
1
10
2
2
0.14
1
3
1
0
2
5
2
12
27
3
21
10
18
3
17
10
14
2
1
1
3
4,083
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5EncoderModel
from torch import nn from ...utils.model_parallel_utils import assert_device_map, get_device_map from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from typing import Optional, Union import warnings from .configuration_mt5 import MT5Config @auto_docstring class MT5EncoderModel(MT5PreTrainedModel): """ Examples: ```python >>> from transformers import MT5EncoderModel, AutoTokenizer >>> model = MT5EncoderModel.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> input_ids = tokenizer(article, return_tensors="pt").input_ids >>> outputs = model(input_ids) >>> hidden_state = outputs.last_hidden_state ```""" model_type = 'mt5' config: MT5Config _tied_weights_keys = ['encoder.embed_tokens.weight'] def __init__(self, config: MT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = config encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) self.post_init() self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn("`T5EncoderModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0, 'block.1': 1, ...}", FutureWarning) self.device_map = get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning) self.encoder.deparallelize() self.encoder = self.encoder.to('cpu') self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training). Example: ```python >>> from transformers import AutoTokenizer, MT5EncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> model = MT5EncoderModel.from_pretrained("google/mt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return encoder_outputs
@auto_docstring class MT5EncoderModel(MT5PreTrainedModel): ''' Examples: ```python >>> from transformers import MT5EncoderModel, AutoTokenizer >>> model = MT5EncoderModel.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> input_ids = tokenizer(article, return_tensors="pt").input_ids >>> outputs = model(input_ids) >>> hidden_state = outputs.last_hidden_state ```''' def __init__(self, config: MT5Config): pass @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): pass @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def get_encoder(self): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training). Example: ```python >>> from transformers import AutoTokenizer, MT5EncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> model = MT5EncoderModel.from_pretrained("google/mt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```''' pass
13
3
12
1
8
2
1
0.51
1
7
3
0
8
4
8
11
132
19
75
31
53
38
41
19
32
2
2
1
11
4,084
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5ForConditionalGeneration
from torch import nn import copy from ...utils.model_parallel_utils import assert_device_map, get_device_map from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from typing import Optional, Union import warnings from .configuration_mt5 import MT5Config from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...generation import GenerationMixin @auto_docstring(custom_intro='\n MT5 Model with a `language modeling` head on top.\n ') class MT5ForConditionalGeneration(MT5PreTrainedModel, GenerationMixin): """ Examples: ```python >>> from transformers import MT5ForConditionalGeneration, AutoTokenizer >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, text_target=summary, return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss ```""" model_type = 'mt5' config: MT5Config _keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight'] _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: MT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = MT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.post_init() self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn("`T5ForConditionalGeneration.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'encoder.block.0': 0, 'encoder.block.1': 1, ...}", FutureWarning) self.device_map = get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.decoder.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning) self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to('cpu') self.decoder = self.decoder.to('cpu') self.lm_head = self.lm_head.to('cpu') self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) MT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [MT5 Training](./mt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoTokenizer, MT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer( ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) >>> # studies have shown that owning a dog is good for you. ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) hidden_states = encoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) if labels is not None and decoder_input_ids is None and (decoder_inputs_embeds is None): decoder_input_ids = self._shift_right(labels) if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) sequence_output = decoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.encoder.first_device) self.lm_head = self.lm_head.to(self.encoder.first_device) sequence_output = sequence_output.to(self.lm_head.weight.device) if self.config.tie_word_embeddings: sequence_output = sequence_output * self.model_dim ** (-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return (loss,) + output if loss is not None else output return Seq2SeqLMOutput(loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels)
@auto_docstring(custom_intro='\n MT5 Model with a `language modeling` head on top.\n ') class MT5ForConditionalGeneration(MT5PreTrainedModel, GenerationMixin): ''' Examples: ```python >>> from transformers import MT5ForConditionalGeneration, AutoTokenizer >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, text_target=summary, return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss ```''' def __init__(self, config: MT5Config): pass @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): pass @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) MT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [MT5 Training](./mt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoTokenizer, MT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer( ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) >>> # studies have shown that owning a dog is good for you. ```''' pass def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): pass
13
2
21
2
15
4
3
0.35
2
10
4
0
12
7
12
15
305
45
193
59
157
67
114
37
101
20
2
2
37
4,085
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5ForQuestionAnswering
from torch import nn import copy from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from typing import Optional, Union import warnings from .configuration_mt5 import MT5Config from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @auto_docstring class MT5ForQuestionAnswering(MT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight'] _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: MT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = MT5Stack(decoder_config, self.shared) self.num_labels = config.num_labels self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() self.model_parallel = False def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if start_positions is not None and end_positions is not None: use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError('If no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.') decoder_input_ids = self._shift_right(input_ids) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) hidden_states = encoder_outputs[0] decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=None, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = decoder_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + decoder_outputs[1:] + encoder_outputs return (total_loss,) + output if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class MT5ForQuestionAnswering(MT5PreTrainedModel): def __init__(self, config: MT5Config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. ''' pass
8
1
28
3
22
3
4
0.19
1
9
4
0
6
7
6
9
184
24
134
48
107
26
73
29
66
19
2
2
24
4,086
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5ForSequenceClassification
from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from typing import Optional, Union from .configuration_mt5 import MT5Config from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @auto_docstring(custom_intro='\n MT5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ') class MT5ForSequenceClassification(MT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight'] _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: MT5Config): super().__init__(config) self.transformer = MT5Model(config) self.classification_head = MT5ClassificationHead(config) self.post_init() self.model_parallel = False @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Seq2SeqSequenceClassifierOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) MT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [MT5 Training](./mt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}') if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError('If no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.') decoder_input_ids = self._shift_right(input_ids) outputs = self.transformer(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError('All examples must have the same number of <eos> tokens.') batch_size, _, hidden_size = sequence_output.shape sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = 'regression' elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Seq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
@auto_docstring(custom_intro='\n MT5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ') class MT5ForSequenceClassification(MT5PreTrainedModel): def __init__(self, config: MT5Config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Seq2SeqSequenceClassifierOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) MT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [MT5 Training](./mt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
59
5
49
5
9
0.11
1
10
4
0
2
3
2
5
126
12
103
35
81
11
48
17
45
17
2
3
18
4,087
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5ForTokenClassification
from torch import nn from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from typing import Optional, Union from .configuration_mt5 import MT5Config from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @auto_docstring class MT5ForTokenClassification(MT5PreTrainedModel): _tied_weights_keys = ['transformer.encoder.embed_tokens.weight'] def __init__(self, config: MT5Config): super().__init__(config) self.num_labels = config.num_labels self.transformer = MT5EncoderModel(config) self.dropout = nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./t5#training). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits, outputs[2:-1]) return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class MT5ForTokenClassification(MT5PreTrainedModel): def __init__(self, config: MT5Config): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./t5#training). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
5
1
29
4
22
3
3
0.17
1
6
3
0
2
4
2
5
65
9
48
25
33
8
23
14
20
5
2
1
6
4,088
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5LayerCrossAttention
from torch import nn from ...utils.deprecation import deprecate_kwarg from typing import Optional, Union class MT5LayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): super().__init__() self.EncDecAttention = MT5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx) self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention(normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] return outputs
class MT5LayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None): pass
4
0
17
0
17
1
1
0.03
1
4
2
0
2
3
2
12
36
1
35
22
20
1
12
10
9
1
1
0
2
4,089
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5LayerFF
from torch import nn from .configuration_mt5 import MT5Config class MT5LayerFF(nn.Module): def __init__(self, config: MT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = MT5DenseGatedActDense(config) else: self.DenseReluDense = MT5DenseActDense(config) self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states
class MT5LayerFF(nn.Module): def __init__(self, config: MT5Config): pass def forward(self, hidden_states): pass
3
0
7
1
7
0
2
0
1
5
4
0
2
3
2
12
16
2
14
7
11
0
13
7
10
2
1
1
3
4,090
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5LayerNorm
from torch import nn import torch class MT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Construct a layernorm module in the MT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states
class MT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): ''' Construct a layernorm module in the MT5 style. No bias and no subtraction of mean. ''' pass def forward(self, hidden_states): pass
3
1
11
2
5
4
2
0.73
1
1
0
0
2
2
2
12
23
4
11
6
8
8
11
6
8
2
1
1
3
4,091
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5LayerSelfAttention
from typing import Optional, Union from torch import nn from ...utils.deprecation import deprecate_kwarg class MT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.SelfAttention = MT5Attention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx) self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention(normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] return outputs
class MT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None): pass
4
0
16
0
16
1
1
0.03
1
4
2
0
2
3
2
12
34
1
33
19
20
1
12
9
9
1
1
0
2
4,092
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5Model
from torch import nn import copy from ...utils.model_parallel_utils import assert_device_map, get_device_map from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from typing import Optional, Union import warnings from .configuration_mt5 import MT5Config @auto_docstring class MT5Model(MT5PreTrainedModel): """ Examples: ```python >>> from transformers import MT5Model, AutoTokenizer >>> model = MT5Model.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="pt") >>> labels = tokenizer(text_target=summary, return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" model_type = 'mt5' config: MT5Config _keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight'] _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: MT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = MT5Stack(decoder_config, self.shared) self.post_init() self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn("`T5Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'encoder.block.0': 0, 'encoder.block.1': 1, ...}", FutureWarning) self.device_map = get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning) self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to('cpu') self.decoder = self.decoder.to('cpu') self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) MT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [MT5 Training](./mt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, MT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> model = MT5Model.from_pretrained("google/mt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for MT5Model. >>> # This is not needed for torch's MT5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) hidden_states = encoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class MT5Model(MT5PreTrainedModel): ''' Examples: ```python >>> from transformers import MT5Model, AutoTokenizer >>> model = MT5Model.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="pt") >>> labels = tokenizer(text_target=summary, return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```''' def __init__(self, config: MT5Config): pass @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): pass @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def get_encoder(self): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) MT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [MT5 Training](./mt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, MT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> model = MT5Model.from_pretrained("google/mt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for MT5Model. >>> # This is not needed for torch's MT5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```''' pass
13
3
20
2
15
3
3
0.34
1
9
4
0
9
5
9
12
225
30
145
45
113
50
74
24
64
14
2
2
24
4,093
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5PreTrainedModel
from .configuration_mt5 import MT5Config from ...modeling_utils import PreTrainedModel from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging import torch @auto_docstring class MT5PreTrainedModel(PreTrainedModel): config: MT5Config base_model_prefix = 'transformer' is_parallelizable = True supports_gradient_checkpointing = True _can_compile_fullgraph = True _no_split_modules = ['MT5Block'] _keep_in_fp32_modules = ['wo'] @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = {'decoder_input_ids': input_ids, 'input_ids': input_ids, 'decoder_attention_mask': input_mask} return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, MT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (MT5Model, MT5ForConditionalGeneration, MT5EncoderModel, MT5ForQuestionAnswering)): module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, 'lm_head') and (not self.config.tie_word_embeddings): module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, 'qa_outputs'): module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) module.qa_outputs.bias.data.zero_() elif isinstance(module, MT5ForTokenClassification): if hasattr(module, 'classifier'): module.classifier.weight.data.normal_(mean=0.0, std=factor * 1.0) module.classifier.bias.data.zero_() elif isinstance(module, MT5ClassificationHead): module.dense.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.dense, 'bias') and module.dense.bias is not None: module.dense.bias.data.zero_() module.out_proj.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.out_proj, 'bias') and module.out_proj.bias is not None: module.out_proj.bias.data.zero_() elif isinstance(module, MT5DenseActDense): module.wi.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi, 'bias') and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5)) if hasattr(module.wo, 'bias') and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, MT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi_0, 'bias') and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi_1, 'bias') and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5)) if hasattr(module.wo, 'bias') and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, MT5Attention): d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * (n_heads * key_value_proj_dim) ** (-0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError('self.model.config.decoder_start_token_id has to be defined. In MT5 it is usually set to the pad_token_id. See MT5 docs for more information.') if is_torch_fx_proxy(input_ids): shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError('self.model.config.pad_token_id has to be defined.') shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids
@auto_docstring class MT5PreTrainedModel(PreTrainedModel): @property def dummy_inputs(self): pass def _init_weights(self, module): '''Initialize the weights''' pass def _shift_right(self, input_ids): pass
6
1
32
1
27
4
8
0.18
1
11
10
7
3
0
3
3
115
8
92
25
87
17
74
24
70
19
1
2
24
4,094
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/modeling_mt5.py
transformers.models.mt5.modeling_mt5.MT5Stack
from torch import nn from ...utils.model_parallel_utils import assert_device_map, get_device_map from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput from typing import Optional, Union import warnings from ...modeling_attn_mask_utils import AttentionMaskConverter class MT5Stack(MT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList([MT5Block(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers)]) self.final_layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.post_init() self.model_parallel = False self.device_map = None self.gradient_checkpointing = False @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn("`MT5Stack.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0, 'block.1': 1, ...}", FutureWarning) self.device_map = get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map assert_device_map(self.device_map, len(self.block)) self.model_parallel = True self.first_device = 'cpu' if 'cpu' in self.device_map else 'cuda:' + str(min(self.device_map.keys())) self.last_device = 'cuda:' + str(max(self.device_map.keys())) for k, v in self.device_map.items(): for layer in v: cuda_device = 'cuda:' + str(k) self.block[layer] = self.block[layer].to(cuda_device) self.embed_tokens = self.embed_tokens.to(self.first_device) self.final_layer_norm = self.final_layer_norm.to(self.last_device) @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning) self.model_parallel = False self.device_map = None self.first_device = 'cpu' self.last_device = 'cpu' for i in range(len(self.block)): self.block[i] = self.block[i].to('cpu') self.embed_tokens = self.embed_tokens.to('cpu') self.final_layer_norm = self.final_layer_norm.to('cpu') torch.cuda.empty_cache() def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): if self.model_parallel: torch.cuda.set_device(self.first_device) self.embed_tokens = self.embed_tokens.to(self.first_device) use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if inputs_embeds is None: if self.embed_tokens is None: raise ValueError('You have to initialize the model with valid token embeddings') inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if use_cache is True: if not self.is_decoder: raise ValueError(f'`use_cache` can only be set to `True` if {self} is used as a decoder') if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) else: past_key_values = DynamicCache(config=self.config) elif not self.is_decoder: past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if attention_mask is None and (not is_torchdynamo_compiling()): mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.config.is_decoder: causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions) elif attention_mask is not None: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min else: causal_mask = None if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.is_decoder else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if self.model_parallel: torch.cuda.set_device(hidden_states.device) if causal_mask is not None: causal_mask = causal_mask.to(hidden_states.device) if position_bias is not None: position_bias = position_bias.to(hidden_states.device) if encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states.to(hidden_states.device) if encoder_extended_attention_mask is not None: encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device) if encoder_decoder_position_bias is not None: encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device) if layer_head_mask is not None: layer_head_mask = layer_head_mask.to(hidden_states.device) if cross_attn_layer_head_mask is not None: cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=return_dict, cache_position=cache_position) hidden_states = layer_outputs[0] position_bias = layer_outputs[1] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and 'cuda:' + str(k) != self.last_device: hidden_states = hidden_states.to('cuda:' + str(k + 1)) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
class MT5Stack(MT5PreTrainedModel): def __init__(self, config, embed_tokens=None): pass @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): pass @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): pass def set_input_embeddings(self, new_embeddings): pass def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass
11
1
53
4
42
7
10
0.17
1
18
8
0
7
10
8
11
439
42
342
92
299
57
192
58
183
57
2
4
80
4,095
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/tokenization_mt5.py
transformers.models.mt5.tokenization_mt5.MT5Tokenizer
from ..t5 import T5Tokenizer class MT5Tokenizer(T5Tokenizer): pass
class MT5Tokenizer(T5Tokenizer): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
109
2
0
2
1
1
0
2
1
1
0
4
0
0
4,096
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mt5/tokenization_mt5_fast.py
transformers.models.mt5.tokenization_mt5_fast.MT5TokenizerFast
from ..t5 import T5TokenizerFast class MT5TokenizerFast(T5TokenizerFast): pass
class MT5TokenizerFast(T5TokenizerFast): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
96
2
0
2
1
1
0
2
1
1
0
4
0
0
4,097
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/configuration_musicgen.py
transformers.models.musicgen.configuration_musicgen.MusicgenConfig
from ..auto.configuration_auto import AutoConfig from ...configuration_utils import PretrainedConfig class MusicgenConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MusicgenModel`]. It is used to instantiate a MusicGen model according to the specified arguments, defining the text encoder, audio encoder and MusicGen decoder configs. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: kwargs (*optional*): Dictionary of keyword arguments. Notably: - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the text encoder config. - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the decoder config. Example: ```python >>> from transformers import ( ... MusicgenConfig, ... MusicgenDecoderConfig, ... T5Config, ... EncodecConfig, ... MusicgenForConditionalGeneration, ... ) >>> # Initializing text encoder, audio encoder, and decoder model configurations >>> text_encoder_config = T5Config() >>> audio_encoder_config = EncodecConfig() >>> decoder_config = MusicgenDecoderConfig() >>> configuration = MusicgenConfig.from_sub_models_config( ... text_encoder_config, audio_encoder_config, decoder_config ... ) >>> # Initializing a MusicgenForConditionalGeneration (with random weights) from the facebook/musicgen-small style configuration >>> model = MusicgenForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> config_text_encoder = model.config.text_encoder >>> config_audio_encoder = model.config.audio_encoder >>> config_decoder = model.config.decoder >>> # Saving the model, including its configuration >>> model.save_pretrained("musicgen-model") >>> # loading model and config from pretrained folder >>> musicgen_config = MusicgenConfig.from_pretrained("musicgen-model") >>> model = MusicgenForConditionalGeneration.from_pretrained("musicgen-model", config=musicgen_config) ```""" model_type = 'musicgen' sub_configs = {'text_encoder': AutoConfig, 'audio_encoder': AutoConfig, 'decoder': MusicgenDecoderConfig} has_no_defaults_at_init = True def __init__(self, **kwargs): super().__init__(**kwargs) if 'text_encoder' not in kwargs or 'audio_encoder' not in kwargs or 'decoder' not in kwargs: raise ValueError('Config has to be initialized with text_encoder, audio_encoder and decoder config') text_encoder_config = kwargs.pop('text_encoder') text_encoder_model_type = text_encoder_config.pop('model_type') audio_encoder_config = kwargs.pop('audio_encoder') audio_encoder_model_type = audio_encoder_config.pop('model_type') decoder_config = kwargs.pop('decoder') self.text_encoder = AutoConfig.for_model(text_encoder_model_type, **text_encoder_config) self.audio_encoder = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config) self.decoder = MusicgenDecoderConfig(**decoder_config) self.is_encoder_decoder = True self.initializer_factor = self.decoder.initializer_factor @classmethod def from_sub_models_config(cls, text_encoder_config: PretrainedConfig, audio_encoder_config: PretrainedConfig, decoder_config: MusicgenDecoderConfig, **kwargs): """ Instantiate a [`MusicgenConfig`] (or a derived class) from text encoder, audio encoder and decoder configurations. Returns: [`MusicgenConfig`]: An instance of a configuration object """ return cls(text_encoder=text_encoder_config.to_dict(), audio_encoder=audio_encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) @property def sampling_rate(self): return self.audio_encoder.sampling_rate
class MusicgenConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MusicgenModel`]. It is used to instantiate a MusicGen model according to the specified arguments, defining the text encoder, audio encoder and MusicGen decoder configs. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: kwargs (*optional*): Dictionary of keyword arguments. Notably: - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the text encoder config. - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the decoder config. Example: ```python >>> from transformers import ( ... MusicgenConfig, ... MusicgenDecoderConfig, ... T5Config, ... EncodecConfig, ... MusicgenForConditionalGeneration, ... ) >>> # Initializing text encoder, audio encoder, and decoder model configurations >>> text_encoder_config = T5Config() >>> audio_encoder_config = EncodecConfig() >>> decoder_config = MusicgenDecoderConfig() >>> configuration = MusicgenConfig.from_sub_models_config( ... text_encoder_config, audio_encoder_config, decoder_config ... ) >>> # Initializing a MusicgenForConditionalGeneration (with random weights) from the facebook/musicgen-small style configuration >>> model = MusicgenForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> config_text_encoder = model.config.text_encoder >>> config_audio_encoder = model.config.audio_encoder >>> config_decoder = model.config.decoder >>> # Saving the model, including its configuration >>> model.save_pretrained("musicgen-model") >>> # loading model and config from pretrained folder >>> musicgen_config = MusicgenConfig.from_pretrained("musicgen-model") >>> model = MusicgenForConditionalGeneration.from_pretrained("musicgen-model", config=musicgen_config) ```''' def __init__(self, **kwargs): pass @classmethod def from_sub_models_config(cls, text_encoder_config: PretrainedConfig, audio_encoder_config: PretrainedConfig, decoder_config: MusicgenDecoderConfig, **kwargs): ''' Instantiate a [`MusicgenConfig`] (or a derived class) from text encoder, audio encoder and decoder configurations. Returns: [`MusicgenConfig`]: An instance of a configuration object ''' pass @property def sampling_rate(self): pass
6
2
13
2
9
2
1
1.34
1
4
2
0
2
4
3
3
110
21
38
24
26
51
21
16
17
2
1
1
4
4,098
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/configuration_musicgen.py
transformers.models.musicgen.configuration_musicgen.MusicgenDecoderConfig
from ...configuration_utils import PretrainedConfig class MusicgenDecoderConfig(PretrainedConfig): """ This is the configuration class to store the configuration of an [`MusicgenDecoder`]. It is used to instantiate a MusicGen decoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MusicGen [facebook/musicgen-small](https://huggingface.co/facebook/musicgen-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 2048): Vocabulary size of the MusicgenDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MusicgenDecoder`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 24): Number of decoder layers. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer block. ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_factor (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(hidden_size). use_cache (`bool`, *optional*, defaults to `True`): Whether the model should return the last key/values attentions (not used by all models) num_codebooks (`int`, *optional*, defaults to 4): The number of parallel codebooks forwarded to the model. tie_word_embeddings(`bool`, *optional*, defaults to `False`): Whether input and output word embeddings should be tied. audio_channels (`int`, *optional*, defaults to 1 Number of channels in the audio data. Either 1 for mono or 2 for stereo. Stereo models generate a separate audio stream for the left/right output channels. Mono models generate a single audio stream output. """ model_type = 'musicgen_decoder' base_config_key = 'decoder_config' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=2048, max_position_embeddings=2048, num_hidden_layers=24, ffn_dim=4096, num_attention_heads=16, layerdrop=0.0, use_cache=True, activation_function='gelu', hidden_size=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, initializer_factor=0.02, scale_embedding=False, num_codebooks=4, audio_channels=1, pad_token_id=2048, bos_token_id=2048, eos_token_id=None, tie_word_embeddings=False, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.ffn_dim = ffn_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.initializer_factor = initializer_factor self.layerdrop = layerdrop self.use_cache = use_cache self.scale_embedding = scale_embedding self.num_codebooks = num_codebooks if audio_channels not in [1, 2]: raise ValueError(f'Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.') self.audio_channels = audio_channels super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
class MusicgenDecoderConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of an [`MusicgenDecoder`]. It is used to instantiate a MusicGen decoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MusicGen [facebook/musicgen-small](https://huggingface.co/facebook/musicgen-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 2048): Vocabulary size of the MusicgenDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MusicgenDecoder`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 24): Number of decoder layers. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer block. ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_factor (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(hidden_size). use_cache (`bool`, *optional*, defaults to `True`): Whether the model should return the last key/values attentions (not used by all models) num_codebooks (`int`, *optional*, defaults to 4): The number of parallel codebooks forwarded to the model. tie_word_embeddings(`bool`, *optional*, defaults to `False`): Whether input and output word embeddings should be tied. audio_channels (`int`, *optional*, defaults to 1 Number of channels in the audio data. Either 1 for mono or 2 for stereo. Stereo models generate a separate audio stream for the left/right output channels. Mono models generate a single audio stream output. ''' def __init__(self, vocab_size=2048, max_position_embeddings=2048, num_hidden_layers=24, ffn_dim=4096, num_attention_heads=16, layerdrop=0.0, use_cache=True, activation_function='gelu', hidden_size=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, initializer_factor=0.02, scale_embedding=False, num_codebooks=4, audio_channels=1, pad_token_id=2048, bos_token_id=2048, eos_token_id=None, tie_word_embeddings=False, **kwargs): pass
2
1
51
2
49
1
2
0.92
1
2
0
0
1
16
1
1
108
7
53
44
28
49
24
21
22
2
1
1
2
4,099
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/modeling_musicgen.py
transformers.models.musicgen.modeling_musicgen.MusicgenAttention
from ...utils.deprecation import deprecate_kwarg import torch.nn as nn import torch from .configuration_musicgen import MusicgenConfig, MusicgenDecoderConfig from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...processing_utils import Unpack from typing import TYPE_CHECKING, Any, Callable, Optional, Union from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...modeling_flash_attention_utils import FlashAttentionKwargs class MusicgenAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, is_causal: Optional[bool]=False, config: Optional[MusicgenConfig]=None, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights)
class MusicgenAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, is_causal: Optional[bool]=False, config: Optional[MusicgenConfig]=None, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
50
7
35
8
5
0.24
1
7
1
2
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15