id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
4,100
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/modeling_musicgen.py
transformers.models.musicgen.modeling_musicgen.MusicgenDecoder
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, ModelOutput, Seq2SeqLMOutput from typing import TYPE_CHECKING, Any, Callable, Optional, Union import torch.nn as nn import torch import random from .configuration_musicgen import MusicgenConfig, MusicgenDecoderConfig from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa from ...utils import auto_docstring, is_torch_flex_attn_available, logging import math from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class MusicgenDecoder(MusicgenPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MusicgenDecoderLayer`] """ def __init__(self, config: MusicgenDecoderConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.max_target_positions = config.max_position_embeddings self.d_model = config.hidden_size self.num_codebooks = config.num_codebooks self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 embed_dim = config.vocab_size + 1 self.embed_tokens = nn.ModuleList([nn.Embedding(embed_dim, config.hidden_size) for _ in range(config.num_codebooks)]) self.embed_positions = MusicgenSinusoidalPositionalEmbedding(config.max_position_embeddings, config.hidden_size) self.layers = nn.ModuleList([MusicgenDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.layer_norm = nn.LayerNorm(config.hidden_size) self.attn_implementation = config._attn_implementation self.gradient_checkpointing = False self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) bsz, num_codebooks, seq_len = input.shape input_shape = (bsz, seq_len) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1:] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...') use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = sum([self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks)]) attention_mask = self._update_causal_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds) positions = self.embed_positions(input, past_key_values_length) hidden_states = inputs_embeds + positions.to(inputs_embeds.device) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {attn_mask.size()[0]}.') for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int): if self.config._attn_implementation == 'flash_attention_2': attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, inputs_embeds, past_key_values_length) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) elif attention_mask is None: attention_mask = make_flex_block_causal_mask(torch.ones(size=input_shape, device=inputs_embeds.device)) else: attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) return attention_mask def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): if encoder_hidden_states is not None and encoder_attention_mask is not None: if self.config._attn_implementation == 'flash_attention_2': encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == 'sdpa': encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) elif self.config._attn_implementation == 'flex_attention': if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False) else: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) return encoder_attention_mask
class MusicgenDecoder(MusicgenPreTrainedModel): ''' Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MusicgenDecoderLayer`] ''' def __init__(self, config: MusicgenDecoderConfig): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. ''' pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int): pass def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): pass
6
2
51
6
41
4
12
0.11
1
12
4
0
4
12
4
5
212
27
166
49
146
19
85
33
80
42
2
3
46
4,101
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/modeling_musicgen.py
transformers.models.musicgen.modeling_musicgen.MusicgenDecoderLayer
from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from .configuration_musicgen import MusicgenConfig, MusicgenDecoderConfig from typing import TYPE_CHECKING, Any, Callable, Optional, Union from ...utils.deprecation import deprecate_kwarg import torch.nn as nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class MusicgenDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MusicgenDecoderConfig, layer_idx=None): super().__init__() self.embed_dim = config.hidden_size self.self_attn = MusicgenAttention(embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, is_causal=True, config=config, layer_idx=layer_idx) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = MusicgenAttention(self.embed_dim, config.num_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, config=config, layer_idx=layer_idx) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=False) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=False) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
class MusicgenDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MusicgenDecoderConfig, layer_idx=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
4
1
59
6
41
13
4
0.31
1
4
1
0
2
11
2
12
121
12
83
32
69
26
44
21
41
6
1
1
7
4,102
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/modeling_musicgen.py
transformers.models.musicgen.modeling_musicgen.MusicgenForCausalLM
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, ModelOutput, Seq2SeqLMOutput import copy from torch.nn import CrossEntropyLoss from typing import TYPE_CHECKING, Any, Callable, Optional, Union import torch.nn as nn from ...generation import ClassifierFreeGuidanceLogitsProcessor, GenerationConfig, GenerationMixin, GenerationMode, LogitsProcessorList, StoppingCriteriaList from .configuration_musicgen import MusicgenConfig, MusicgenDecoderConfig import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache @auto_docstring(custom_intro='\n The MusicGen decoder model with a language modelling head on top.\n ') class MusicgenForCausalLM(MusicgenPreTrainedModel, GenerationMixin): def __init__(self, config: MusicgenDecoderConfig): super().__init__(config) self.model = MusicgenModel(config) self.num_codebooks = config.num_codebooks self.lm_heads = nn.ModuleList([nn.Linear(config.hidden_size, config.vocab_size, bias=False) for _ in range(config.num_codebooks)]) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_heads def set_output_embeddings(self, new_embeddings): self.lm_heads = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, CausalLMOutputWithCrossAttentions]: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and (input_ids is None and inputs_embeds is None): input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.bos_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] lm_logits = torch.stack([head(hidden_states) for head in self.lm_heads], dim=1) loss = None if labels is not None: logits = lm_logits[:, :, -labels.shape[1]:] loss_fct = CrossEntropyLoss() loss = torch.zeros([], device=self.device) labels = labels.masked_fill(labels == self.config.pad_token_id, -100) for codebook in range(self.config.num_codebooks): codebook_logits = logits[:, codebook].contiguous().view(-1, logits.shape[-1]) codebook_labels = labels[..., codebook].contiguous().view(-1) loss += loss_fct(codebook_logits, codebook_labels) loss = loss / self.config.num_codebooks lm_logits = lm_logits.reshape(-1, *lm_logits.shape[2:]) if not return_dict: output = (lm_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=True, delay_pattern_mask=None, guidance_scale=None, **kwargs): if delay_pattern_mask is None: input_ids, delay_pattern_mask = self.build_delay_pattern_mask(input_ids, pad_token_id=self.generation_config.pad_token_id, max_length=self.generation_config.max_length) input_ids = self.apply_delay_pattern_mask(input_ids, delay_pattern_mask) if guidance_scale is not None and guidance_scale > 1: input_ids = input_ids.repeat((2, 1)) if attention_mask is not None: attention_mask = attention_mask.repeat((2, 1)) if past_key_values is not None: input_ids = input_ids[:, -1:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, 'head_mask': head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} def build_delay_pattern_mask(self, input_ids: torch.LongTensor, pad_token_id: int, max_length: Optional[int]=None): """Build a delayed pattern mask to the input_ids. Each codebook is offset by the previous codebook by one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there are 4 codebooks and a max sequence length of 8, we have the delayed pattern mask of shape `(codebooks, seq_len)`: - [P, -1, -1, -1, -1, P, P, P] - [P, P, -1, -1, -1, -1, P, P] - [P, P, P, -1, -1, -1, -1, P] - [P, P, P, P, -1, -1, -1, -1] where P is the special padding token id and -1 indicates that the token is valid for prediction. If we include a prompt (decoder input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the mask is set to the value in the prompt: - [P, a, b, -1, -1, P, P, P] - [P, P, c, d, -1, -1, P, P] - [P, P, P, e, f, -1, -1, P] - [P, P, P, P, g, h, -1, -1] where a-h indicate the input prompt (decoder input ids) that are offset by 1. Now, we only override the -1 tokens in our prediction. """ input_ids = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) bsz, num_codebooks, seq_len = input_ids.shape max_length = max_length if max_length is not None else self.generation_config.max_length input_ids_shifted = torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1 channel_codebooks = num_codebooks // 2 if self.config.audio_channels == 2 else num_codebooks if max_length < 2 * channel_codebooks - 1: return (input_ids.reshape(bsz * num_codebooks, -1), input_ids_shifted.reshape(bsz * num_codebooks, -1)) for codebook in range(channel_codebooks): if self.config.audio_channels == 1: input_ids_shifted[:, codebook, codebook:seq_len + codebook] = input_ids[:, codebook] else: input_ids_shifted[:, 2 * codebook, codebook:seq_len + codebook] = input_ids[:, 2 * codebook] input_ids_shifted[:, 2 * codebook + 1, codebook:seq_len + codebook] = input_ids[:, 2 * codebook + 1] delay_pattern = torch.triu(torch.ones((channel_codebooks, max_length), dtype=torch.bool), diagonal=max_length - channel_codebooks + 1) delay_pattern = delay_pattern + torch.tril(torch.ones((channel_codebooks, max_length), dtype=torch.bool)) if self.config.audio_channels == 2: delay_pattern = delay_pattern.repeat_interleave(2, dim=0) mask = ~delay_pattern.to(input_ids.device) input_ids = mask * input_ids_shifted + ~mask * pad_token_id first_codebook_ids = input_ids[:, 0, :] start_ids = (first_codebook_ids == -1).nonzero()[:, 1] if len(start_ids) > 0: first_start_id = min(start_ids) else: first_start_id = seq_len pattern_mask = input_ids.reshape(bsz * num_codebooks, -1) input_ids = input_ids[..., :first_start_id].reshape(bsz * num_codebooks, -1) return (input_ids, pattern_mask) @staticmethod def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask): """Apply a delay pattern mask to the decoder input ids, only preserving predictions where the mask is set to -1, and otherwise setting to the value detailed in the mask.""" seq_len = input_ids.shape[-1] decoder_pad_token_mask = decoder_pad_token_mask[..., :seq_len] input_ids = torch.where(decoder_pad_token_mask == -1, input_ids, decoder_pad_token_mask) return input_ids @torch.no_grad() def generate(self, inputs: Optional[torch.Tensor]=None, generation_config: Optional[GenerationConfig]=None, logits_processor: Optional[LogitsProcessorList]=None, stopping_criteria: Optional[StoppingCriteriaList]=None, synced_gpus: Optional[bool]=None, streamer: Optional['BaseStreamer']=None, **kwargs): """ Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed to avoid deadlocking with `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3). streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. kwargs (`dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateDecoderOnlyOutput`], - [`~generation.GenerateBeamDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() requires_attention_mask = 'encoder_outputs' not in model_kwargs kwargs_has_attention_mask = model_kwargs.get('attention_mask', None) is not None input_ids, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, generation_config.bos_token_id, model_kwargs) batch_size = input_ids.shape[0] // self.num_codebooks self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=input_ids.device) model_kwargs['use_cache'] = generation_config.use_cache model_kwargs['guidance_scale'] = generation_config.guidance_scale if model_kwargs.get('attention_mask', None) is None and requires_attention_mask: model_kwargs['attention_mask'] = self._prepare_attention_mask_for_generation(input_ids, generation_config, model_kwargs) input_ids_length = input_ids.shape[-1] has_default_max_length = kwargs.get('max_length') is None and generation_config.max_length is not None has_default_min_length = kwargs.get('min_length') is None and generation_config.min_length is not None generation_config = self._prepare_generated_length(generation_config=generation_config, has_default_max_length=has_default_max_length, has_default_min_length=has_default_min_length, model_input_name=model_input_name, inputs_tensor=input_ids, input_ids_length=input_ids_length) self._validate_generated_length(generation_config, input_ids_length, has_default_max_length) max_cache_length = generation_config.max_length - 1 if input_ids_length.shape[1] != input_ids_length and model_input_name == 'inputs_embeds' and (not self.config.is_encoder_decoder): max_cache_length += input_ids_length.shape[1] self._prepare_cache_for_generation(generation_config, model_kwargs, generation_mode=None, batch_size=batch_size, max_cache_length=max_cache_length) input_ids, delay_pattern_mask = self.build_delay_pattern_mask(input_ids, pad_token_id=generation_config._decoder_start_token_tensor, max_length=generation_config.max_length) if streamer is not None: streamer.put(input_ids.cpu()) model_kwargs['delay_pattern_mask'] = delay_pattern_mask generation_mode = generation_config.get_generation_mode() if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) generation_config.guidance_scale = None logits_processor = self._get_logits_processor(generation_config=generation_config, input_ids_seq_length=input_ids_length, encoder_input_ids=input_ids, prefix_allowed_tokens_fn=None, logits_processor=logits_processor, device=input_ids.device) stopping_criteria = self._get_stopping_criteria(generation_config=generation_config, stopping_criteria=stopping_criteria) if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH): input_ids, model_kwargs = self._expand_inputs_for_generation(input_ids=input_ids, expand_size=generation_config.num_return_sequences, **model_kwargs) outputs = self._sample(input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, generation_config=generation_config, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs) else: raise ValueError('Got incompatible mode for generation, should be one of greedy or sampling. Ensure that beam search is de-activated by setting `num_beams=1`.') if generation_config.return_dict_in_generate: output_ids = outputs.sequences else: output_ids = outputs output_ids = self.apply_delay_pattern_mask(output_ids, model_kwargs['delay_pattern_mask']) output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape(batch_size, self.num_codebooks, -1) if generation_config.return_dict_in_generate: outputs.sequences = output_ids return outputs else: return output_ids
null
17
4
36
5
21
10
3
0.47
2
11
5
0
11
3
12
13
448
71
257
91
202
121
130
49
117
10
2
2
38
4,103
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/modeling_musicgen.py
transformers.models.musicgen.modeling_musicgen.MusicgenForConditionalGeneration
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, ModelOutput, Seq2SeqLMOutput import inspect from ..auto.modeling_auto import AutoModel import copy from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from typing import TYPE_CHECKING, Any, Callable, Optional, Union from ..auto.configuration_auto import AutoConfig import torch.nn as nn from ...generation import ClassifierFreeGuidanceLogitsProcessor, GenerationConfig, GenerationMixin, GenerationMode, LogitsProcessorList, StoppingCriteriaList import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from .configuration_musicgen import MusicgenConfig, MusicgenDecoderConfig from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache @auto_docstring(custom_intro='\n The composite MusicGen model with a text encoder, audio encoder and Musicgen decoder,\n ') class MusicgenForConditionalGeneration(MusicgenPreTrainedModel, GenerationMixin): config: MusicgenConfig base_model_prefix = 'encoder_decoder' main_input_name = 'input_ids' supports_gradient_checkpointing = True def __init__(self, config: Optional[MusicgenConfig]=None, text_encoder: Optional[PreTrainedModel]=None, audio_encoder: Optional[PreTrainedModel]=None, decoder: Optional[MusicgenForCausalLM]=None): """ text_encoder (`PreTrainedModel`, *optional*): The text encoder model that encodes text into hidden states for conditioning. audio_encoder (`PreTrainedModel`, *optional*): The audio encoder model that encodes audio into hidden states for conditioning. decoder (`MusicgenForCausalLM`, *optional*): The decoder model that generates audio tokens based on conditioning signals. """ if config is None and (text_encoder is None or audio_encoder is None or decoder is None): raise ValueError('Either a configuration has to be provided, or all three of text encoder, audio encoder and MusicGen decoder.') if config is None: config = MusicgenConfig.from_sub_models_config(text_encoder.config, audio_encoder.config, decoder.config) elif not isinstance(config, self.config_class): raise ValueError(f'Config: {config} has to be of type {self.config_class}') if config.decoder.cross_attention_hidden_size is not None: if config.decoder.cross_attention_hidden_size != config.text_encoder.hidden_size: raise ValueError(f"If `cross_attention_hidden_size` is specified in the MusicGen decoder's configuration, it has to be equal to the text encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for `config.decoder.cross_attention_hidden_size` and {config.text_encoder.hidden_size} for `config.text_encoder.hidden_size`.") super().__init__(config) if text_encoder is None: from ..auto.modeling_auto import AutoModelForTextEncoding text_encoder = AutoModelForTextEncoding.from_config(config.text_encoder) if audio_encoder is None: from ..auto.modeling_auto import AutoModel audio_encoder = AutoModel.from_config(config.audio_encoder) if decoder is None: decoder = MusicgenForCausalLM._from_config(config.decoder) self.text_encoder = text_encoder self.audio_encoder = audio_encoder self.decoder = decoder if self.text_encoder.config.to_dict() != self.config.text_encoder.to_dict(): logger.warning(f'Config of the text_encoder: {self.text_encoder.__class__} is overwritten by shared text_encoder config: {self.config.text_encoder}') if self.audio_encoder.config.to_dict() != self.config.audio_encoder.to_dict(): logger.warning(f'Config of the audio_encoder: {self.audio_encoder.__class__} is overwritten by shared audio_encoder config: {self.config.audio_encoder}') if self.decoder.config.to_dict() != self.config.decoder.to_dict(): logger.warning(f'Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config: {self.config.decoder}') self.config.text_encoder._attn_implementation = self.text_encoder.config._attn_implementation self.config.audio_encoder._attn_implementation = self.audio_encoder.config._attn_implementation self.config.decoder._attn_implementation = self.decoder.config._attn_implementation self.text_encoder.config = self.config.text_encoder self.audio_encoder.config = self.config.audio_encoder self.decoder.config = self.config.decoder if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None: self.enc_to_dec_proj = nn.Linear(self.text_encoder.config.hidden_size, self.decoder.config.hidden_size) if self.text_encoder.get_output_embeddings() is not None: raise ValueError(f'The encoder {self.text_encoder} should not have a LM Head. Please use a model without and LM Head') decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys()) if 'encoder_hidden_states' not in decoder_signature: raise ValueError('The selected decoder is not prepared for the encoder hidden states to be passed. Please see the following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350') self.tie_weights() def tie_weights(self): if self.config.tie_encoder_decoder: decoder_base_model_prefix = self.decoder.base_model_prefix tied_weights = self._tie_encoder_decoder_weights(self.text_encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix, 'text_encoder') self._dynamic_tied_weights_keys = tied_weights def get_audio_encoder(self): return self.audio_encoder def get_text_encoder(self): return self.text_encoder def get_encoder(self): return self.get_text_encoder() def get_input_embeddings(self): return self.text_encoder.get_input_embeddings() def get_output_embeddings(self): return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.decoder.set_output_embeddings(new_embeddings) @classmethod def from_sub_models_pretrained(cls, text_encoder_pretrained_model_name_or_path: Optional[str]=None, audio_encoder_pretrained_model_name_or_path: Optional[str]=None, decoder_pretrained_model_name_or_path: Optional[str]=None, *model_args, **kwargs) -> PreTrainedModel: """ Instantiate a text encoder, an audio encoder, and a MusicGen decoder from one, two or three base classes of the library from pretrained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: text_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the text encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. audio_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the audio encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the decoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the text encoder configuration, use the prefix *text_encoder_* for each configuration parameter. - To update the audio encoder configuration, use the prefix *audio_encoder_* for each configuration parameter. - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import MusicgenForConditionalGeneration >>> # initialize a musicgen model from a t5 text encoder, encodec audio encoder, and musicgen decoder >>> model = MusicgenForConditionalGeneration.from_sub_models_pretrained( ... text_encoder_pretrained_model_name_or_path="google-t5/t5-base", ... audio_encoder_pretrained_model_name_or_path="facebook/encodec_24khz", ... decoder_pretrained_model_name_or_path="facebook/musicgen-small", ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./musicgen-ft") >>> # load fine-tuned model >>> model = MusicgenForConditionalGeneration.from_pretrained("./musicgen-ft") ```""" kwargs_text_encoder = {argument[len('text_encoder_'):]: value for argument, value in kwargs.items() if argument.startswith('text_encoder_')} kwargs_audio_encoder = {argument[len('audio_encoder_'):]: value for argument, value in kwargs.items() if argument.startswith('audio_encoder_')} kwargs_decoder = {argument[len('decoder_'):]: value for argument, value in kwargs.items() if argument.startswith('decoder_')} for key in kwargs_text_encoder: del kwargs['text_encoder_' + key] for key in kwargs_audio_encoder: del kwargs['audio_encoder_' + key] for key in kwargs_decoder: del kwargs['decoder_' + key] text_encoder = kwargs_text_encoder.pop('model', None) if text_encoder is None: if text_encoder_pretrained_model_name_or_path is None: raise ValueError('If `text_encoder_model` is not defined as an argument, a `text_encoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_text_encoder: encoder_config, kwargs_text_encoder = AutoConfig.from_pretrained(text_encoder_pretrained_model_name_or_path, **kwargs_text_encoder, return_unused_kwargs=True) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info(f'Initializing {text_encoder_pretrained_model_name_or_path} as a text_encoder model from a decoder model. Cross-attention and causal mask are disabled.') encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_text_encoder['config'] = encoder_config text_encoder = AutoModel.from_pretrained(text_encoder_pretrained_model_name_or_path, *model_args, **kwargs_text_encoder) audio_encoder = kwargs_audio_encoder.pop('model', None) if audio_encoder is None: if audio_encoder_pretrained_model_name_or_path is None: raise ValueError('If `audio_encoder_model` is not defined as an argument, an `audio_encoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_audio_encoder: encoder_config, kwargs_audio_encoder = AutoConfig.from_pretrained(audio_encoder_pretrained_model_name_or_path, **kwargs_audio_encoder, return_unused_kwargs=True) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info(f'Initializing {audio_encoder_pretrained_model_name_or_path} as an audio_encoder model from a decoder model. Cross-attention and causal mask are disabled.') encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_audio_encoder['config'] = encoder_config audio_encoder = AutoModel.from_pretrained(audio_encoder_pretrained_model_name_or_path, *model_args, **kwargs_audio_encoder) decoder = kwargs_decoder.pop('model', None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError('If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_decoder: decoder_config, kwargs_decoder = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True) if isinstance(decoder_config, MusicgenConfig): decoder_config = decoder_config.decoder if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info(f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers.") decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder['config'] = decoder_config if kwargs_decoder['config'].is_decoder is False or kwargs_decoder['config'].add_cross_attention is False: logger.warning(f'Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to `.from_sub_models_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_sub_models_pretrained(...)`') decoder = MusicgenForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) config = MusicgenConfig.from_sub_models_config(text_encoder.config, audio_encoder.config, decoder.config, **kwargs) return cls(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder, config=config) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.BoolTensor]=None, input_values: Optional[torch.FloatTensor]=None, padding_mask: Optional[torch.BoolTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, Seq2SeqLMOutput]: """ padding_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) <Tip warning={true}> The `decoder_input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `decoder_input_ids`. </Tip> decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration >>> import torch >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> inputs = processor( ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> pad_token_id = model.generation_config.pad_token_id >>> decoder_input_ids = ( ... torch.ones((inputs.input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long) ... * pad_token_id ... ) >>> logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits >>> logits.shape # (bsz * num_codebooks, tgt_len, vocab_size) torch.Size([8, 1, 2048]) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict kwargs_text_encoder = {argument[len('text_encoder_')]: value for argument, value in kwargs.items() if argument.startswith('text_encoder_')} kwargs_audio_encoder = {argument[len('audio_encoder_')]: value for argument, value in kwargs.items() if argument.startswith('audio_encoder_')} kwargs_decoder = {argument[len('decoder_'):]: value for argument, value in kwargs.items() if argument.startswith('decoder_')} if encoder_outputs is None: encoder_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs_text_encoder) elif isinstance(encoder_outputs, tuple): encoder_outputs = BaseModelOutput(*encoder_outputs) encoder_hidden_states = encoder_outputs[0] if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) if attention_mask is not None: encoder_hidden_states = encoder_hidden_states * attention_mask[..., None] if labels is not None and (decoder_input_ids is None and decoder_inputs_embeds is None): decoder_input_ids = shift_tokens_right(labels, self.config.decoder.pad_token_id, self.config.decoder.decoder_start_token_id) elif decoder_input_ids is None and decoder_inputs_embeds is None: audio_encoder_outputs = self.audio_encoder(input_values=input_values, padding_mask=padding_mask, **kwargs_audio_encoder) audio_codes = audio_encoder_outputs.audio_codes frames, bsz, codebooks, seq_len = audio_codes.shape if frames != 1: raise ValueError(f'Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is disabled by setting `chunk_length=None` in the audio encoder.') if self.config.decoder.audio_channels == 2 and audio_codes.shape[2] == self.decoder.num_codebooks // 2: audio_codes = audio_codes.repeat_interleave(2, dim=2) decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, past_key_values=past_key_values, return_dict=return_dict, labels=labels, **kwargs_decoder) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqLMOutput(loss=decoder_outputs.loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_attention_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, decoder_delay_pattern_mask=None, guidance_scale=None, cache_position=None, **kwargs): if decoder_delay_pattern_mask is None: decoder_input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask(decoder_input_ids, self.generation_config.pad_token_id, max_length=self.generation_config.max_length) decoder_input_ids = self.decoder.apply_delay_pattern_mask(decoder_input_ids, decoder_delay_pattern_mask) if guidance_scale is not None and guidance_scale > 1: decoder_input_ids = decoder_input_ids.repeat((2, 1)) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.repeat((2, 1)) if past_key_values is not None: if cache_position[-1] >= decoder_input_ids.shape[1]: decoder_input_ids = decoder_input_ids[:, -cache_position.shape[0]:] elif decoder_input_ids.shape[1] != cache_position.shape[0]: decoder_input_ids = decoder_input_ids[:, cache_position] else: decoder_input_ids = decoder_input_ids[:, -1:] return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def _prepare_decoder_input_ids_for_generation(self, batch_size: int, model_input_name: str, model_kwargs: dict[str, torch.Tensor], decoder_start_token_id: Optional[int]=None, bos_token_id: Optional[int]=None, device: Optional[torch.device]=None) -> tuple[torch.LongTensor, dict[str, torch.Tensor]]: """Prepares `decoder_input_ids` for generation with encoder-decoder models""" if model_kwargs is not None and 'decoder_input_ids' in model_kwargs: decoder_input_ids = model_kwargs.pop('decoder_input_ids') elif 'input_ids' in model_kwargs and model_input_name != 'input_ids': decoder_input_ids = model_kwargs.pop('input_ids') else: decoder_input_ids = None decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) if device is None: device = self.device decoder_input_ids_start = torch.ones((batch_size * self.decoder.num_codebooks, 1), dtype=torch.long, device=device) * decoder_start_token_id if decoder_input_ids is None: decoder_input_ids = decoder_input_ids_start elif (decoder_input_ids[..., 0] != decoder_start_token_id).all().item(): decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1) if 'decoder_attention_mask' in model_kwargs: decoder_attention_mask = model_kwargs['decoder_attention_mask'] decoder_attention_mask = torch.cat((torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), dim=-1) model_kwargs['decoder_attention_mask'] = decoder_attention_mask return (decoder_input_ids, model_kwargs) def _prepare_text_encoder_kwargs_for_generation(self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str], generation_config: GenerationConfig) -> dict[str, Any]: encoder = self.get_text_encoder() if hasattr(encoder, '_hf_hook'): encoder._hf_hook.io_same_device = True irrelevant_prefix = ['decoder_', 'cross_attn', 'use_cache'] encoder_kwargs = {argument: value for argument, value in model_kwargs.items() if not any((argument.startswith(p) for p in irrelevant_prefix))} encoder_signature = set(inspect.signature(encoder.forward).parameters) encoder_accepts_wildcard = 'kwargs' in encoder_signature or 'model_kwargs' in encoder_signature if not encoder_accepts_wildcard: encoder_kwargs = {argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature} encoder_kwargs['output_attentions'] = generation_config.output_attentions encoder_kwargs['output_hidden_states'] = generation_config.output_hidden_states guidance_scale = generation_config.guidance_scale model_input_name = model_input_name if model_input_name is not None else self.text_encoder.main_input_name encoder_kwargs['return_dict'] = True encoder_kwargs[model_input_name] = inputs_tensor last_hidden_state = encoder(**encoder_kwargs).last_hidden_state if guidance_scale is not None and guidance_scale > 1: last_hidden_state = torch.concatenate([last_hidden_state, torch.zeros_like(last_hidden_state)], dim=0) if 'attention_mask' in model_kwargs: model_kwargs['attention_mask'] = torch.concatenate([model_kwargs['attention_mask'], torch.zeros_like(model_kwargs['attention_mask'])], dim=0) model_kwargs['encoder_outputs'] = BaseModelOutput(last_hidden_state=last_hidden_state) return model_kwargs def _prepare_audio_encoder_kwargs_for_generation(self, input_values, model_kwargs, model_input_name: Optional[str]=None): encoder = self.get_audio_encoder() if hasattr(encoder, '_hf_hook'): encoder._hf_hook.io_same_device = True irrelevant_prefix = ['decoder_', 'cross_attn', 'use_cache'] encoder_kwargs = {argument: value for argument, value in model_kwargs.items() if not any((argument.startswith(p) for p in irrelevant_prefix))} encoder_signature = set(inspect.signature(encoder.forward).parameters) encoder_accepts_wildcard = 'kwargs' in encoder_signature or 'model_kwargs' in encoder_signature if not encoder_accepts_wildcard: encoder_kwargs = {argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature} model_input_name = model_input_name if model_input_name is not None else self.audio_encoder.main_input_name encoder_kwargs['return_dict'] = True if self.decoder.config.audio_channels == 1: encoder_kwargs[model_input_name] = input_values audio_encoder_outputs = encoder.encode(**encoder_kwargs) audio_codes = audio_encoder_outputs.audio_codes audio_scales = audio_encoder_outputs.audio_scales frames, bsz, codebooks, seq_len = audio_codes.shape else: if input_values.shape[1] != 2: raise ValueError(f'Expected stereo audio (2-channels) but example has {input_values.shape[1]} channel.') encoder_kwargs[model_input_name] = input_values[:, :1, :] audio_encoder_outputs_left = encoder.encode(**encoder_kwargs) audio_codes_left = audio_encoder_outputs_left.audio_codes audio_scales_left = audio_encoder_outputs_left.audio_scales encoder_kwargs[model_input_name] = input_values[:, 1:, :] audio_encoder_outputs_right = encoder.encode(**encoder_kwargs) audio_codes_right = audio_encoder_outputs_right.audio_codes audio_scales_right = audio_encoder_outputs_right.audio_scales frames, bsz, codebooks, seq_len = audio_codes_left.shape audio_codes = audio_codes_left.new_ones((frames, bsz, 2 * codebooks, seq_len)) audio_codes[:, :, ::2, :] = audio_codes_left audio_codes[:, :, 1::2, :] = audio_codes_right if audio_scales_left != [None] or audio_scales_right != [None]: audio_scales = torch.stack([audio_scales_left, audio_scales_right], dim=1) else: audio_scales = [None] * bsz if frames != 1: raise ValueError(f'Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is disabled by setting `chunk_length=None` in the audio encoder.') decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len) model_kwargs['decoder_input_ids'] = decoder_input_ids model_kwargs['audio_scales'] = audio_scales return model_kwargs def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.decoder.pad_token_id, self.config.decoder.bos_token_id) def resize_token_embeddings(self, *args, **kwargs): raise NotImplementedError('Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...))') def freeze_audio_encoder(self): """ Freeze the audio encoder weights. """ for param in self.audio_encoder.parameters(): param.requires_grad = False self.audio_encoder._requires_grad = False def freeze_text_encoder(self): """ Freeze the text encoder weights. """ for param in self.text_encoder.parameters(): param.requires_grad = False self.text_encoder._requires_grad = False def _maybe_initialize_input_ids_for_generation(self, inputs: Optional[torch.Tensor]=None, bos_token_id: Optional[int]=None, model_kwargs: Optional[dict[str, torch.Tensor]]=None) -> torch.LongTensor: """Initializes input ids for generation, if necessary.""" if inputs is not None: return inputs encoder_outputs = model_kwargs.get('encoder_outputs') if encoder_outputs is not None: shape = encoder_outputs[0].size()[:-1] return torch.ones(shape, dtype=torch.long, device=self.device) * -100 if bos_token_id is None: raise ValueError('`bos_token_id` has to be defined when no `input_ids` are provided.') batch_size = 1 for value in model_kwargs.values(): if isinstance(value, torch.Tensor): batch_size = value.shape[0] break return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id def _get_decoder_start_token_id(self, decoder_start_token_id: Optional[Union[int, list[int]]]=None, bos_token_id: Optional[int]=None) -> int: decoder_start_token_id = decoder_start_token_id if decoder_start_token_id is not None else self.generation_config.decoder_start_token_id bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif bos_token_id is not None: return bos_token_id raise ValueError('`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation.') @torch.no_grad() def generate(self, inputs: Optional[torch.Tensor]=None, generation_config: Optional[GenerationConfig]=None, logits_processor: Optional[LogitsProcessorList]=None, stopping_criteria: Optional[StoppingCriteriaList]=None, synced_gpus: Optional[bool]=None, streamer: Optional['BaseStreamer']=None, **kwargs): """ Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed to avoid deadlocking with `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3). streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. kwargs (`dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateDecoderOnlyOutput`], - [`~generation.GenerateBeamDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) if model_kwargs.get('encoder_outputs') is not None and type(model_kwargs['encoder_outputs']) is tuple: model_kwargs['encoder_outputs'] = BaseModelOutput(last_hidden_state=model_kwargs['encoder_outputs'][0]) logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() requires_attention_mask = 'encoder_outputs' not in model_kwargs kwargs_has_attention_mask = model_kwargs.get('attention_mask', None) is not None inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, generation_config.bos_token_id, model_kwargs) batch_size = inputs_tensor.shape[0] self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=inputs_tensor.device) model_kwargs['use_cache'] = generation_config.use_cache model_kwargs['guidance_scale'] = generation_config.guidance_scale if model_kwargs.get('attention_mask', None) is None and requires_attention_mask: model_kwargs['attention_mask'] = self._prepare_attention_mask_for_generation(inputs_tensor, generation_config, model_kwargs) if 'encoder_outputs' not in model_kwargs: model_kwargs = self._prepare_text_encoder_kwargs_for_generation(inputs_tensor, model_kwargs, model_input_name, generation_config) if 'decoder_input_ids' not in model_kwargs and 'input_values' in model_kwargs: model_kwargs = self._prepare_audio_encoder_kwargs_for_generation(model_kwargs['input_values'], model_kwargs) input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(batch_size=batch_size, model_input_name=model_input_name, model_kwargs=model_kwargs, decoder_start_token_id=generation_config._decoder_start_token_tensor, bos_token_id=generation_config._bos_token_tensor, device=inputs_tensor.device) input_ids_length = input_ids.shape[-1] has_default_max_length = kwargs.get('max_length') is None and generation_config.max_length is not None has_default_min_length = kwargs.get('min_length') is None and generation_config.min_length is not None generation_config = self._prepare_generated_length(generation_config=generation_config, has_default_max_length=has_default_max_length, has_default_min_length=has_default_min_length, model_input_name=model_input_name, inputs_tensor=inputs_tensor, input_ids_length=input_ids_length) input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask(input_ids, pad_token_id=generation_config._decoder_start_token_tensor, max_length=generation_config.max_length) model_kwargs['decoder_delay_pattern_mask'] = decoder_delay_pattern_mask if streamer is not None: streamer.put(input_ids.cpu()) generation_mode = generation_config.get_generation_mode() if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) generation_config.guidance_scale = None logits_processor = self._get_logits_processor(generation_config=generation_config, input_ids_seq_length=input_ids_length, encoder_input_ids=inputs_tensor, prefix_allowed_tokens_fn=None, logits_processor=logits_processor, device=input_ids.device) stopping_criteria = self._get_stopping_criteria(generation_config=generation_config, stopping_criteria=stopping_criteria) if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH): input_ids, model_kwargs = self._expand_inputs_for_generation(input_ids=input_ids, expand_size=generation_config.num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs) outputs = self._sample(input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, generation_config=generation_config, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs) else: raise ValueError('Got incompatible mode for generation, should be one of greedy or sampling. Ensure that beam search is de-activated by setting `num_beams=1`.') if generation_config.return_dict_in_generate: output_ids = outputs.sequences else: output_ids = outputs output_ids = self.decoder.apply_delay_pattern_mask(output_ids, model_kwargs['decoder_delay_pattern_mask']) output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape(batch_size, self.decoder.num_codebooks, -1) output_ids = output_ids[None, ...] audio_scales = model_kwargs.get('audio_scales') if audio_scales is None: audio_scales = [None] * batch_size if self.decoder.config.audio_channels == 1: output_values = self.audio_encoder.decode(output_ids, audio_scales=audio_scales).audio_values else: codec_outputs_left = self.audio_encoder.decode(output_ids[:, :, ::2, :], audio_scales=audio_scales) output_values_left = codec_outputs_left.audio_values codec_outputs_right = self.audio_encoder.decode(output_ids[:, :, 1::2, :], audio_scales=audio_scales) output_values_right = codec_outputs_right.audio_values output_values = torch.cat([output_values_left, output_values_right], dim=1) if generation_config.return_dict_in_generate: outputs.sequences = output_values return outputs else: return output_values def get_unconditional_inputs(self, num_samples=1): """ Helper function to get null inputs for unconditional generation, enabling the model to be used without the feature extractor or tokenizer. Args: num_samples (int, *optional*): Number of audio samples to unconditionally generate. max_new_tokens (int, *optional*): Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of longer inference (since more audio tokens need to be generated per sample). Example: ```python >>> from transformers import MusicgenForConditionalGeneration >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> # get the unconditional (or 'null') inputs for the model >>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1) >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256) ```""" last_hidden_state = torch.zeros((num_samples, 1, self.config.text_encoder.hidden_size), device=self.device, dtype=self.dtype) attention_mask = torch.zeros((num_samples, 1), device=self.device, dtype=torch.long) return MusicgenUnconditionalInput(encoder_outputs=(last_hidden_state,), attention_mask=attention_mask, guidance_scale=1.0)
null
27
9
44
6
28
9
5
0.32
2
21
10
0
22
6
24
24
1,086
177
690
196
581
221
349
114
322
18
1
3
115
4,104
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/modeling_musicgen.py
transformers.models.musicgen.modeling_musicgen.MusicgenModel
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, ModelOutput, Seq2SeqLMOutput from typing import TYPE_CHECKING, Any, Callable, Optional, Union import torch.nn as nn from .configuration_musicgen import MusicgenConfig, MusicgenDecoderConfig import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache @auto_docstring class MusicgenModel(MusicgenPreTrainedModel): def __init__(self, config: MusicgenDecoderConfig): super().__init__(config) self.decoder = MusicgenDecoder(config) self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict decoder_outputs = self.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)
@auto_docstring class MusicgenModel(MusicgenPreTrainedModel): def __init__(self, config: MusicgenDecoderConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. ''' pass
7
1
12
1
11
0
2
0.04
1
6
3
0
5
1
5
6
65
7
56
23
35
2
20
8
14
6
2
1
10
4,105
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/modeling_musicgen.py
transformers.models.musicgen.modeling_musicgen.MusicgenPreTrainedModel
import torch.nn as nn from .configuration_musicgen import MusicgenConfig, MusicgenDecoderConfig from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...utils import auto_docstring, is_torch_flex_attn_available, logging @auto_docstring class MusicgenPreTrainedModel(PreTrainedModel): config: MusicgenDecoderConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['MusicgenDecoderLayer', 'MusicgenAttention'] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def _init_weights(self, module): std = self.config.initializer_factor if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
@auto_docstring class MusicgenPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
10
0
10
0
5
0.24
1
0
0
3
1
0
1
1
23
2
17
9
15
4
16
9
14
5
1
2
5
4,106
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/modeling_musicgen.py
transformers.models.musicgen.modeling_musicgen.MusicgenSinusoidalPositionalEmbedding
import torch.nn as nn import math import torch class MusicgenSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int): super().__init__() self.embedding_dim = embedding_dim self.make_weights(num_positions, embedding_dim) def make_weights(self, num_embeddings: int, embedding_dim: int): emb_weights = self.get_embedding(num_embeddings, embedding_dim) if hasattr(self, 'weights'): emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer('weights', emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.cos(emb), torch.sin(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0): bsz, codebooks, seq_len = input_ids.size() position_ids = (torch.arange(seq_len) + past_key_values_length).to(input_ids.device) if seq_len > self.weights.size(0): self.make_weights(seq_len, self.embedding_dim) return self.weights.index_select(0, position_ids.view(-1)).detach()
class MusicgenSinusoidalPositionalEmbedding(nn.Module): '''This module produces sinusoidal positional embeddings of any length.''' def __init__(self, num_positions: int, embedding_dim: int): pass def make_weights(self, num_embeddings: int, embedding_dim: int): pass @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int): ''' Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". ''' pass @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0): pass
7
2
9
0
7
2
2
0.31
1
3
0
0
3
2
4
14
43
5
29
14
22
9
27
12
22
2
1
1
7
4,107
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/modeling_musicgen.py
transformers.models.musicgen.modeling_musicgen.MusicgenUnconditionalInput
import torch.nn as nn from dataclasses import dataclass import torch from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, ModelOutput, Seq2SeqLMOutput from typing import TYPE_CHECKING, Any, Callable, Optional, Union @dataclass @auto_docstring class MusicgenUnconditionalInput(ModelOutput): """ encoder_outputs (`tuple[torch.FloatTensor]` of length 1, with tensor shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the text encoder model. attention_mask (`torch.LongTensor`) of shape `(batch_size, sequence_length)`, *optional*): Encoder attention mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**. guidance_scale (`float`, *optional*): Guidance scale for classifier free guidance, setting the balance between the conditional logits (predicted from the prompts) and the unconditional logits (predicted without prompts). """ encoder_outputs: Optional[tuple[torch.FloatTensor]] = None attention_mask: Optional[torch.LongTensor] = None guidance_scale: Optional[float] = None
@dataclass @auto_docstring class MusicgenUnconditionalInput(ModelOutput): ''' encoder_outputs (`tuple[torch.FloatTensor]` of length 1, with tensor shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the text encoder model. attention_mask (`torch.LongTensor`) of shape `(batch_size, sequence_length)`, *optional*): Encoder attention mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**. guidance_scale (`float`, *optional*): Guidance scale for classifier free guidance, setting the balance between the conditional logits (predicted from the prompts) and the unconditional logits (predicted without prompts). ''' pass
3
1
0
0
0
0
0
2.75
1
0
0
0
0
0
0
0
16
1
4
4
3
11
4
4
3
0
1
0
0
4,108
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen/processing_musicgen.py
transformers.models.musicgen.processing_musicgen.MusicgenProcessor
import numpy as np from typing import Any from ...processing_utils import ProcessorMixin from ...utils import to_numpy class MusicgenProcessor(ProcessorMixin): """ Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor class. [`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information. Args: feature_extractor (`EncodecFeatureExtractor`): An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input. tokenizer (`T5Tokenizer`): An instance of [`T5Tokenizer`]. The tokenizer is a required input. """ feature_extractor_class = 'EncodecFeatureExtractor' tokenizer_class = ('T5Tokenizer', 'T5TokenizerFast') def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps) def __call__(self, *args, **kwargs): """ Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text` argument to [`~T5Tokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. """ if self._in_target_context_manager: return self.current_processor(*args, **kwargs) if len(args) > 0: kwargs['audio'] = args[0] return super().__call__(*args, **kwargs) def batch_decode(self, *args, **kwargs): """ This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ audio_values = kwargs.pop('audio', None) padding_mask = kwargs.pop('padding_mask', None) if len(args) > 0: audio_values = args[0] args = args[1:] if audio_values is not None: return self._decode_audio(audio_values, padding_mask=padding_mask) else: return self.tokenizer.batch_decode(*args, **kwargs) def _decode_audio(self, audio_values, padding_mask: Any=None) -> list[np.ndarray]: """ This method strips any padding from the audio values to return a list of numpy audio arrays. """ audio_values = to_numpy(audio_values) bsz, channels, seq_len = audio_values.shape if padding_mask is None: return list(audio_values) padding_mask = to_numpy(padding_mask) difference = seq_len - padding_mask.shape[-1] padding_value = 1 - self.feature_extractor.padding_value padding_mask = np.pad(padding_mask, ((0, 0), (0, difference)), 'constant', constant_values=padding_value) audio_values = audio_values.tolist() for i in range(bsz): sliced_audio = np.asarray(audio_values[i])[padding_mask[i][None, :] != self.feature_extractor.padding_value] audio_values[i] = sliced_audio.reshape(channels, -1) return audio_values
class MusicgenProcessor(ProcessorMixin): ''' Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor class. [`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information. Args: feature_extractor (`EncodecFeatureExtractor`): An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input. tokenizer (`T5Tokenizer`): An instance of [`T5Tokenizer`]. The tokenizer is a required input. ''' def __init__(self, feature_extractor, tokenizer): pass def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): pass def __call__(self, *args, **kwargs): ''' Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text` argument to [`~T5Tokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. ''' pass def batch_decode(self, *args, **kwargs): ''' This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. ''' pass def _decode_audio(self, audio_values, padding_mask: Any=None) -> list[np.ndarray]: ''' This method strips any padding from the audio values to return a list of numpy audio arrays. ''' pass
6
4
15
2
10
3
3
0.51
1
4
0
0
6
2
6
23
115
23
61
23
54
31
56
23
49
9
2
2
18
4,109
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py
transformers.models.musicgen_melody.configuration_musicgen_melody.MusicgenMelodyConfig
from ..auto.configuration_auto import AutoConfig from ...configuration_utils import PretrainedConfig class MusicgenMelodyConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MusicgenMelodyModel`]. It is used to instantiate a Musicgen Melody model according to the specified arguments, defining the text encoder, audio encoder and Musicgen Melody decoder configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Musicgen Melody [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_chroma (`int`, *optional*, defaults to 12): Number of chroma bins to use. chroma_length (`int`, *optional*, defaults to 235): Maximum chroma duration if audio is used to condition the model. Corresponds to the maximum duration used during training. kwargs (*optional*): Dictionary of keyword arguments. Notably: - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the text encoder config. - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the decoder config. Example: ```python >>> from transformers import ( ... MusicgenMelodyConfig, ... MusicgenMelodyDecoderConfig, ... T5Config, ... EncodecConfig, ... MusicgenMelodyForConditionalGeneration, ... ) >>> # Initializing text encoder, audio encoder, and decoder model configurations >>> text_encoder_config = T5Config() >>> audio_encoder_config = EncodecConfig() >>> decoder_config = MusicgenMelodyDecoderConfig() >>> configuration = MusicgenMelodyConfig.from_sub_models_config( ... text_encoder_config, audio_encoder_config, decoder_config ... ) >>> # Initializing a MusicgenMelodyForConditionalGeneration (with random weights) from the facebook/musicgen-melody style configuration >>> model = MusicgenMelodyForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> config_text_encoder = model.config.text_encoder >>> config_audio_encoder = model.config.audio_encoder >>> config_decoder = model.config.decoder >>> # Saving the model, including its configuration >>> model.save_pretrained("musicgen_melody-model") >>> # loading model and config from pretrained folder >>> musicgen_melody_config = MusicgenMelodyConfig.from_pretrained("musicgen_melody-model") >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("musicgen_melody-model", config=musicgen_melody_config) ```""" model_type = 'musicgen_melody' sub_configs = {'text_encoder': AutoConfig, 'audio_encoder': AutoConfig, 'decoder': MusicgenMelodyDecoderConfig} has_no_defaults_at_init = True def __init__(self, num_chroma=12, chroma_length=235, **kwargs): super().__init__(**kwargs) if 'text_encoder' not in kwargs or 'audio_encoder' not in kwargs or 'decoder' not in kwargs: raise ValueError('Config has to be initialized with text_encoder, audio_encoder and decoder config') text_encoder_config = kwargs.pop('text_encoder') text_encoder_model_type = text_encoder_config.pop('model_type') audio_encoder_config = kwargs.pop('audio_encoder') audio_encoder_model_type = audio_encoder_config.pop('model_type') decoder_config = kwargs.pop('decoder') self.text_encoder = AutoConfig.for_model(text_encoder_model_type, **text_encoder_config) self.audio_encoder = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config) self.decoder = MusicgenMelodyDecoderConfig(**decoder_config) self.is_encoder_decoder = False self.num_chroma = num_chroma self.chroma_length = chroma_length @classmethod def from_sub_models_config(cls, text_encoder_config: PretrainedConfig, audio_encoder_config: PretrainedConfig, decoder_config: MusicgenMelodyDecoderConfig, **kwargs): """ Instantiate a [`MusicgenMelodyConfig`] (or a derived class) from text encoder, audio encoder and decoder configurations. Returns: [`MusicgenMelodyConfig`]: An instance of a configuration object """ return cls(text_encoder=text_encoder_config.to_dict(), audio_encoder=audio_encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) @property def sampling_rate(self): return self.audio_encoder.sampling_rate
class MusicgenMelodyConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MusicgenMelodyModel`]. It is used to instantiate a Musicgen Melody model according to the specified arguments, defining the text encoder, audio encoder and Musicgen Melody decoder configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Musicgen Melody [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_chroma (`int`, *optional*, defaults to 12): Number of chroma bins to use. chroma_length (`int`, *optional*, defaults to 235): Maximum chroma duration if audio is used to condition the model. Corresponds to the maximum duration used during training. kwargs (*optional*): Dictionary of keyword arguments. Notably: - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the text encoder config. - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the decoder config. Example: ```python >>> from transformers import ( ... MusicgenMelodyConfig, ... MusicgenMelodyDecoderConfig, ... T5Config, ... EncodecConfig, ... MusicgenMelodyForConditionalGeneration, ... ) >>> # Initializing text encoder, audio encoder, and decoder model configurations >>> text_encoder_config = T5Config() >>> audio_encoder_config = EncodecConfig() >>> decoder_config = MusicgenMelodyDecoderConfig() >>> configuration = MusicgenMelodyConfig.from_sub_models_config( ... text_encoder_config, audio_encoder_config, decoder_config ... ) >>> # Initializing a MusicgenMelodyForConditionalGeneration (with random weights) from the facebook/musicgen-melody style configuration >>> model = MusicgenMelodyForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> config_text_encoder = model.config.text_encoder >>> config_audio_encoder = model.config.audio_encoder >>> config_decoder = model.config.decoder >>> # Saving the model, including its configuration >>> model.save_pretrained("musicgen_melody-model") >>> # loading model and config from pretrained folder >>> musicgen_melody_config = MusicgenMelodyConfig.from_pretrained("musicgen_melody-model") >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("musicgen_melody-model", config=musicgen_melody_config) ```''' def __init__(self, num_chroma=12, chroma_length=235, **kwargs): pass @classmethod def from_sub_models_config(cls, text_encoder_config: PretrainedConfig, audio_encoder_config: PretrainedConfig, decoder_config: MusicgenMelodyDecoderConfig, **kwargs): ''' Instantiate a [`MusicgenMelodyConfig`] (or a derived class) from text encoder, audio encoder and decoder configurations. Returns: [`MusicgenMelodyConfig`]: An instance of a configuration object ''' pass @property def sampling_rate(self): pass
6
2
16
2
12
2
1
1.22
1
4
2
0
2
6
3
3
122
22
45
31
28
55
23
18
19
2
1
1
4
4,110
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py
transformers.models.musicgen_melody.configuration_musicgen_melody.MusicgenMelodyDecoderConfig
from ...configuration_utils import PretrainedConfig class MusicgenMelodyDecoderConfig(PretrainedConfig): """ This is the configuration class to store the configuration of an [`MusicgenMelodyDecoder`]. It is used to instantiate a Musicgen Melody decoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Musicgen Melody [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 2048): Vocabulary size of the MusicgenMelodyDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MusicgenMelodyDecoder`]. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). num_hidden_layers (`int`, *optional*, defaults to 24): Number of decoder layers. ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer block. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether the model should return the last key/values attentions (not used by all models) activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. initializer_factor (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(hidden_size). num_codebooks (`int`, *optional*, defaults to 4): The number of parallel codebooks forwarded to the model. audio_channels (`int`, *optional*, defaults to 1): Number of audio channels used by the model (either mono or stereo). Stereo models generate a separate audio stream for the left/right output channels. Mono models generate a single audio stream output. pad_token_id (`int`, *optional*, defaults to 2048): The id of the *padding* token. bos_token_id (`int`, *optional*, defaults to 2048): The id of the *beginning-of-sequence* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie word embeddings with the text encoder. """ model_type = 'musicgen_melody_decoder' base_config_key = 'decoder_config' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=2048, max_position_embeddings=2048, num_hidden_layers=24, ffn_dim=4096, num_attention_heads=16, layerdrop=0.0, use_cache=True, activation_function='gelu', hidden_size=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, initializer_factor=0.02, scale_embedding=False, num_codebooks=4, audio_channels=1, pad_token_id=2048, bos_token_id=2048, eos_token_id=None, tie_word_embeddings=False, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.ffn_dim = ffn_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.initializer_factor = initializer_factor self.layerdrop = layerdrop self.use_cache = use_cache self.scale_embedding = scale_embedding self.num_codebooks = num_codebooks if audio_channels not in [1, 2]: raise ValueError(f'Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.') self.audio_channels = audio_channels super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
class MusicgenMelodyDecoderConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of an [`MusicgenMelodyDecoder`]. It is used to instantiate a Musicgen Melody decoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Musicgen Melody [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 2048): Vocabulary size of the MusicgenMelodyDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MusicgenMelodyDecoder`]. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). num_hidden_layers (`int`, *optional*, defaults to 24): Number of decoder layers. ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer block. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether the model should return the last key/values attentions (not used by all models) activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. initializer_factor (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(hidden_size). num_codebooks (`int`, *optional*, defaults to 4): The number of parallel codebooks forwarded to the model. audio_channels (`int`, *optional*, defaults to 1): Number of audio channels used by the model (either mono or stereo). Stereo models generate a separate audio stream for the left/right output channels. Mono models generate a single audio stream output. pad_token_id (`int`, *optional*, defaults to 2048): The id of the *padding* token. bos_token_id (`int`, *optional*, defaults to 2048): The id of the *beginning-of-sequence* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie word embeddings with the text encoder. ''' def __init__(self, vocab_size=2048, max_position_embeddings=2048, num_hidden_layers=24, ffn_dim=4096, num_attention_heads=16, layerdrop=0.0, use_cache=True, activation_function='gelu', hidden_size=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, initializer_factor=0.02, scale_embedding=False, num_codebooks=4, audio_channels=1, pad_token_id=2048, bos_token_id=2048, eos_token_id=None, tie_word_embeddings=False, **kwargs): pass
2
1
51
2
49
1
2
0.96
1
2
0
0
1
16
1
1
110
7
53
44
28
51
24
21
22
2
1
1
2
4,111
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py
transformers.models.musicgen_melody.feature_extraction_musicgen_melody.MusicgenMelodyFeatureExtractor
from ...utils.import_utils import requires from ...feature_extraction_sequence_utils import SequenceFeatureExtractor import copy from ...feature_extraction_utils import BatchFeature from ...audio_utils import chroma_filter_bank from ...utils import TensorType, is_torch_available, is_torchaudio_available, logging import numpy as np from typing import Any, Optional, Union @requires(backends=('torchaudio',)) class MusicgenMelodyFeatureExtractor(SequenceFeatureExtractor): """ Constructs a MusicgenMelody feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts chroma features from audio processed by [Demucs](https://github.com/adefossez/demucs/tree/main) or directly from raw audio waveform. Args: feature_size (`int`, *optional*, defaults to 12): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 32000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). hop_length (`int`, *optional*, defaults to 4096): Length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients. chunk_length (`int`, *optional*, defaults to 30): The maximum number of chunks of `sampling_rate` samples used to trim and pad longer or shorter audio sequences. n_fft (`int`, *optional*, defaults to 16384): Size of the Fourier transform. num_chroma (`int`, *optional*, defaults to 12): Number of chroma bins to use. padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. return_attention_mask (`bool`, *optional*, defaults to `False`): Whether to return the attention mask. Can be overwritten when calling the feature extractor. [What are attention masks?](../glossary#attention-mask) <Tip> For Whisper models, `attention_mask` should always be passed for batched inference, to avoid subtle bugs. </Tip> stem_indices (`list[int]`, *optional*, defaults to `[3, 2]`): Stem channels to extract if demucs outputs are passed. """ model_input_names = ['input_features'] def __init__(self, feature_size=12, sampling_rate=32000, hop_length=4096, chunk_length=30, n_fft=16384, num_chroma=12, padding_value=0.0, return_attention_mask=False, stem_indices=[3, 2], **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, return_attention_mask=return_attention_mask, **kwargs) self.n_fft = n_fft self.hop_length = hop_length self.chunk_length = chunk_length self.n_samples = chunk_length * sampling_rate self.sampling_rate = sampling_rate self.chroma_filters = torch.from_numpy(chroma_filter_bank(sampling_rate=sampling_rate, num_frequency_bins=n_fft, tuning=0, num_chroma=num_chroma)).float() self.spectrogram = torchaudio.transforms.Spectrogram(n_fft=n_fft, win_length=n_fft, hop_length=hop_length, power=2, center=True, pad=0, normalized=True) self.stem_indices = stem_indices def _torch_extract_fbank_features(self, waveform: torch.Tensor) -> torch.Tensor: """ Compute the chroma spectrogram of the provided audio using the torchaudio spectrogram implementation and the librosa chroma features. """ wav_length = waveform.shape[-1] if wav_length < self.n_fft: pad = self.n_fft - wav_length rest = 0 if pad % 2 == 0 else 1 waveform = torch.nn.functional.pad(waveform, (pad // 2, pad // 2 + rest), 'constant', 0) spec = self.spectrogram(waveform).squeeze(1) raw_chroma = torch.einsum('cf, ...ft->...ct', self.chroma_filters, spec) norm_chroma = torch.nn.functional.normalize(raw_chroma, p=float('inf'), dim=-2, eps=1e-06) norm_chroma = norm_chroma.transpose(1, 2) idx = norm_chroma.argmax(-1, keepdim=True) norm_chroma[:] = 0 norm_chroma.scatter_(dim=-1, index=idx, value=1) return norm_chroma def _extract_stem_indices(self, audio, sampling_rate=None): """ Extracts stems from the output of the [Demucs](https://github.com/adefossez/demucs/tree/main) audio separation model, then converts to mono-channel and resample to the feature extractor sampling rate. Args: audio (`torch.Tensor` of shape `(batch_size, num_stems, channel_size, audio_length)`): The output of the Demucs model to be processed. sampling_rate (`int`, *optional*): Demucs sampling rate. If not specified, defaults to `44000`. """ sampling_rate = 44000 if sampling_rate is None else sampling_rate wav = audio[:, torch.tensor(self.stem_indices)] wav = wav.sum(1) wav = wav.mean(dim=1, keepdim=True) if sampling_rate != self.sampling_rate: wav = torchaudio.functional.resample(wav, sampling_rate, self.sampling_rate, rolloff=0.945, lowpass_filter_width=24) wav = wav.squeeze(1) return wav def __call__(self, audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], truncation: bool=True, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=None, padding: Optional[str]=True, max_length: Optional[int]=None, sampling_rate: Optional[int]=None, **kwargs) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: audio (`torch.Tensor`, `np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[torch.Tensor]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a torch tensor, a numpy array, a list of float values, a list of numpy arrays, a list of torch tensors, or a list of list of float values. If `audio` is the output of Demucs, it has to be a torch tensor of shape `(batch_size, num_stems, channel_size, audio_length)`. Otherwise, it must be mono or stereo channel audio. truncation (`bool`, *optional*, default to `True`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*, defaults to None): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) <Tip> For Musicgen Melody models, audio `attention_mask` is not necessary. </Tip> padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. Note that if `audio` is the output of Demucs, `sampling_rate` must be the sampling rate at which Demucs operates. """ if sampling_rate is None: logger.warning_once(f'It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. Failing to do so can result in silent errors that might be hard to debug.') if isinstance(audio, torch.Tensor) and len(audio.shape) == 4: logger.warning_once('`audio` is a 4-dimensional torch tensor and has thus been recognized as the output of `Demucs`. If this is not the case, make sure to read Musicgen Melody docstrings and to correct `audio` to get the right behaviour.Link to the docstrings: https://huggingface.co/docs/transformers/main/en/model_doc/musicgen_melody') audio = self._extract_stem_indices(audio, sampling_rate=sampling_rate) elif sampling_rate is not None and sampling_rate != self.sampling_rate: audio = torchaudio.functional.resample(audio, sampling_rate, self.sampling_rate, rolloff=0.945, lowpass_filter_width=24) is_batched = isinstance(audio, (np.ndarray, torch.Tensor)) and len(audio.shape) > 1 is_batched = is_batched or (isinstance(audio, (list, tuple)) and isinstance(audio[0], (torch.Tensor, np.ndarray, tuple, list))) if is_batched and (not isinstance(audio[0], torch.Tensor)): audio = [torch.tensor(speech, dtype=torch.float32).unsqueeze(-1) for speech in audio] elif is_batched: audio = [speech.unsqueeze(-1) for speech in audio] elif not is_batched and (not isinstance(audio, torch.Tensor)): audio = torch.tensor(audio, dtype=torch.float32).unsqueeze(-1) if isinstance(audio[0], torch.Tensor) and audio[0].dtype is torch.float64: audio = [speech.to(torch.float32) for speech in audio] if not is_batched: audio = [audio] if len(audio[0].shape) == 3: logger.warning_once('`audio` has been detected as a batch of stereo signals. Will be convert to mono signals. If this is an undesired behaviour, make sure to read Musicgen Melody docstrings and to correct `audio` to get the right behaviour.Link to the docstrings: https://huggingface.co/docs/transformers/main/en/model_doc/musicgen_melody') audio = [stereo.mean(dim=0) for stereo in audio] batched_speech = BatchFeature({'input_features': audio}) padded_inputs = self.pad(batched_speech, padding=padding, max_length=max_length if max_length else self.n_samples, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_tensors='pt') input_features = self._torch_extract_fbank_features(padded_inputs['input_features'].squeeze(-1)) padded_inputs['input_features'] = input_features if return_attention_mask: padded_inputs['attention_mask'] = padded_inputs['attention_mask'][:, ::self.hop_length] if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs def to_dict(self) -> dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ output = copy.deepcopy(self.__dict__) output['feature_extractor_type'] = self.__class__.__name__ if 'mel_filters' in output: del output['mel_filters'] if 'window' in output: del output['window'] if 'chroma_filters' in output: del output['chroma_filters'] if 'spectrogram' in output: del output['spectrogram'] return output
@requires(backends=('torchaudio',)) class MusicgenMelodyFeatureExtractor(SequenceFeatureExtractor): ''' Constructs a MusicgenMelody feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts chroma features from audio processed by [Demucs](https://github.com/adefossez/demucs/tree/main) or directly from raw audio waveform. Args: feature_size (`int`, *optional*, defaults to 12): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 32000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). hop_length (`int`, *optional*, defaults to 4096): Length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients. chunk_length (`int`, *optional*, defaults to 30): The maximum number of chunks of `sampling_rate` samples used to trim and pad longer or shorter audio sequences. n_fft (`int`, *optional*, defaults to 16384): Size of the Fourier transform. num_chroma (`int`, *optional*, defaults to 12): Number of chroma bins to use. padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. return_attention_mask (`bool`, *optional*, defaults to `False`): Whether to return the attention mask. Can be overwritten when calling the feature extractor. [What are attention masks?](../glossary#attention-mask) <Tip> For Whisper models, `attention_mask` should always be passed for batched inference, to avoid subtle bugs. </Tip> stem_indices (`list[int]`, *optional*, defaults to `[3, 2]`): Stem channels to extract if demucs outputs are passed. ''' def __init__(self, feature_size=12, sampling_rate=32000, hop_length=4096, chunk_length=30, n_fft=16384, num_chroma=12, padding_value=0.0, return_attention_mask=False, stem_indices=[3, 2], **kwargs): pass def _torch_extract_fbank_features(self, waveform: torch.Tensor) -> torch.Tensor: ''' Compute the chroma spectrogram of the provided audio using the torchaudio spectrogram implementation and the librosa chroma features. ''' pass def _extract_stem_indices(self, audio, sampling_rate=None): ''' Extracts stems from the output of the [Demucs](https://github.com/adefossez/demucs/tree/main) audio separation model, then converts to mono-channel and resample to the feature extractor sampling rate. Args: audio (`torch.Tensor` of shape `(batch_size, num_stems, channel_size, audio_length)`): The output of the Demucs model to be processed. sampling_rate (`int`, *optional*): Demucs sampling rate. If not specified, defaults to `44000`. ''' pass def __call__(self, audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], truncation: bool=True, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=None, padding: Optional[str]=True, max_length: Optional[int]=None, sampling_rate: Optional[int]=None, **kwargs) -> BatchFeature: ''' Main method to featurize and prepare for the model one or several sequence(s). Args: audio (`torch.Tensor`, `np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[torch.Tensor]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a torch tensor, a numpy array, a list of float values, a list of numpy arrays, a list of torch tensors, or a list of list of float values. If `audio` is the output of Demucs, it has to be a torch tensor of shape `(batch_size, num_stems, channel_size, audio_length)`. Otherwise, it must be mono or stereo channel audio. truncation (`bool`, *optional*, default to `True`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*, defaults to None): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) <Tip> For Musicgen Melody models, audio `attention_mask` is not necessary. </Tip> padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. Note that if `audio` is the output of Demucs, `sampling_rate` must be the sampling rate at which Demucs operates. ''' pass def to_dict(self) -> dict[str, Any]: ''' Serializes this instance to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. ''' pass
7
5
49
7
27
15
5
0.77
1
10
1
0
5
8
5
22
293
48
139
51
110
107
76
28
70
13
3
1
25
4,112
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
transformers.models.musicgen_melody.modeling_musicgen_melody.MusicgenMelodyAttention
from typing import TYPE_CHECKING, Any, Callable, Optional, Union from .configuration_musicgen_melody import MusicgenMelodyConfig, MusicgenMelodyDecoderConfig import torch.nn as nn from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...processing_utils import Unpack import torch from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...utils.deprecation import deprecate_kwarg class MusicgenMelodyAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, is_causal: Optional[bool]=False, config: Optional[MusicgenMelodyConfig]=None, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights)
class MusicgenMelodyAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, is_causal: Optional[bool]=False, config: Optional[MusicgenMelodyConfig]=None, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
50
7
35
8
5
0.24
1
7
1
2
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
4,113
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
transformers.models.musicgen_melody.modeling_musicgen_melody.MusicgenMelodyDecoder
from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from typing import TYPE_CHECKING, Any, Callable, Optional, Union from .configuration_musicgen_melody import MusicgenMelodyConfig, MusicgenMelodyDecoderConfig from ...utils import auto_docstring, is_torch_flex_attn_available, logging import torch.nn as nn import torch import random from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa import math from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class MusicgenMelodyDecoder(MusicgenMelodyPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MusicgenMelodyDecoderLayer`] """ def __init__(self, config: MusicgenMelodyDecoderConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.max_target_positions = config.max_position_embeddings self.d_model = config.hidden_size self.num_codebooks = config.num_codebooks self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 embed_dim = config.vocab_size + 1 self.embed_tokens = nn.ModuleList([nn.Embedding(embed_dim, config.hidden_size) for _ in range(config.num_codebooks)]) self.embed_positions = MusicgenMelodySinusoidalPositionalEmbedding(config.max_position_embeddings, config.hidden_size) self.layers = nn.ModuleList([MusicgenMelodyDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.layer_norm = nn.LayerNorm(config.hidden_size) self.attn_implementation = config._attn_implementation self.gradient_checkpointing = False self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPast]: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states representing the concatenation of the text encoder output and the processed audio encoder output. Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing attention on conditional hidden states. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) bsz, num_codebooks, seq_len = input.shape input_shape = (bsz, seq_len) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1:] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...') use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = sum([self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks)]) if encoder_hidden_states is not None: if encoder_attention_mask is not None and attention_mask is None: attention_mask = torch.ones(inputs_embeds.shape[:2], device=inputs_embeds.device) if attention_mask is not None: if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_states.shape[:2], device=attention_mask.device) attention_mask = torch.cat([encoder_attention_mask, attention_mask], dim=1) inputs_embeds = torch.cat([encoder_hidden_states, inputs_embeds], dim=1) input_shape = inputs_embeds.size()[:-1] attention_mask = self._update_causal_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) positions = self.embed_positions(inputs_embeds, past_key_values_length) hidden_states = inputs_embeds + positions.to(inputs_embeds.device) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The `head_mask` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int): if self.config._attn_implementation == 'flash_attention_2': attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, inputs_embeds, past_key_values_length) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) elif attention_mask is None: attention_mask = make_flex_block_causal_mask(torch.ones(size=input_shape, device=inputs_embeds.device)) else: attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) return attention_mask def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): pass
class MusicgenMelodyDecoder(MusicgenMelodyPreTrainedModel): ''' Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MusicgenMelodyDecoderLayer`] ''' def __init__(self, config: MusicgenMelodyDecoderConfig): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPast]: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states representing the concatenation of the text encoder output and the processed audio encoder output. Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing attention on conditional hidden states. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) ''' pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int): pass def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): pass
6
2
45
7
36
3
10
0.12
1
11
4
0
4
12
4
5
191
30
144
46
125
17
85
31
80
37
2
3
41
4,114
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
transformers.models.musicgen_melody.modeling_musicgen_melody.MusicgenMelodyDecoderLayer
from ...modeling_layers import GradientCheckpointingLayer from typing import TYPE_CHECKING, Any, Callable, Optional, Union from .configuration_musicgen_melody import MusicgenMelodyConfig, MusicgenMelodyDecoderConfig from ...utils.deprecation import deprecate_kwarg from ...activations import ACT2FN import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache import torch.nn as nn class MusicgenMelodyDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MusicgenMelodyDecoderConfig, layer_idx=None): super().__init__() self.embed_dim = config.hidden_size self.self_attn = MusicgenMelodyAttention(embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, is_causal=True, config=config, layer_idx=layer_idx) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=False) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=False) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states return (hidden_states, self_attn_weights)
class MusicgenMelodyDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MusicgenMelodyDecoderConfig, layer_idx=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
4
1
39
5
27
8
3
0.28
1
4
1
0
2
9
2
12
79
10
54
24
43
15
32
16
29
4
1
1
5
4,115
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
transformers.models.musicgen_melody.modeling_musicgen_melody.MusicgenMelodyForCausalLM
import copy from torch.nn import CrossEntropyLoss from typing import TYPE_CHECKING, Any, Callable, Optional, Union from .configuration_musicgen_melody import MusicgenMelodyConfig, MusicgenMelodyDecoderConfig from ...utils import auto_docstring, is_torch_flex_attn_available, logging import torch.nn as nn from ...generation import ClassifierFreeGuidanceLogitsProcessor, GenerationConfig, GenerationMixin, GenerationMode, LogitsProcessorList, StoppingCriteriaList import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache @auto_docstring(custom_intro='\n The Musicgen Melody decoder model with a language modelling head on top.\n ') class MusicgenMelodyForCausalLM(MusicgenMelodyPreTrainedModel, GenerationMixin): def __init__(self, config: MusicgenMelodyDecoderConfig): super().__init__(config) self.model = MusicgenMelodyModel(config) self.num_codebooks = config.num_codebooks self.lm_heads = nn.ModuleList([nn.Linear(config.hidden_size, config.vocab_size, bias=False) for _ in range(config.num_codebooks)]) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_heads def set_output_embeddings(self, new_embeddings): self.lm_heads = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, MusicgenMelodyOutputWithPast]: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states representing the concatenation of the text encoder output and the processed audio encoder output. Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing attention on conditional hidden states. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and (input_ids is None and inputs_embeds is None): input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.bos_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] lm_logits = torch.stack([head(hidden_states) for head in self.lm_heads], dim=1) loss = None if labels is not None: logits = lm_logits[:, :, -labels.shape[1]:] loss_fct = CrossEntropyLoss() loss = torch.zeros([], device=self.device) labels = labels.masked_fill(labels == self.config.pad_token_id, -100) for codebook in range(self.config.num_codebooks): codebook_logits = logits[:, codebook].contiguous().view(-1, logits.shape[-1]) codebook_labels = labels[..., codebook].contiguous().view(-1) loss += loss_fct(codebook_logits, codebook_labels) loss = loss / self.config.num_codebooks lm_logits = lm_logits.reshape(-1, *lm_logits.shape[2:]) if not return_dict: output = (lm_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return MusicgenMelodyOutputWithPast(loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, past_key_values=None, use_cache=True, delay_pattern_mask=None, guidance_scale=None, **kwargs): if delay_pattern_mask is None: input_ids, delay_pattern_mask = self.build_delay_pattern_mask(input_ids, pad_token_id=self.generation_config.pad_token_id, max_length=self.generation_config.max_length) input_ids = self.apply_delay_pattern_mask(input_ids, delay_pattern_mask) if guidance_scale is not None and guidance_scale > 1: input_ids = input_ids.repeat((2, 1)) if attention_mask is not None: attention_mask = attention_mask.repeat((2, 1)) if encoder_hidden_states is not None: encoder_hidden_states = torch.concatenate([encoder_hidden_states, torch.zeros_like(encoder_hidden_states)], dim=0) if encoder_attention_mask is not None: encoder_attention_mask = torch.concatenate(encoder_attention_mask, torch.zeros_like(encoder_attention_mask), dim=0) if past_key_values is not None: input_ids = input_ids[:, -1:] encoder_hidden_states = None return {'input_ids': input_ids, 'attention_mask': attention_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, 'head_mask': head_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} def build_delay_pattern_mask(self, input_ids: torch.LongTensor, pad_token_id: int, max_length: Optional[int]=None): """Build a delayed pattern mask to the input_ids. Each codebook is offset by the previous codebook by one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there are 4 codebooks and a max sequence length of 8, we have the delayed pattern mask of shape `(codebooks, seq_len)`: - [P, -1, -1, -1, -1, P, P, P] - [P, P, -1, -1, -1, -1, P, P] - [P, P, P, -1, -1, -1, -1, P] - [P, P, P, P, -1, -1, -1, -1] where P is the special padding token id and -1 indicates that the token is valid for prediction. If we include a prompt (decoder input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the mask is set to the value in the prompt: - [P, a, b, -1, -1, P, P, P] - [P, P, c, d, -1, -1, P, P] - [P, P, P, e, f, -1, -1, P] - [P, P, P, P, g, h, -1, -1] where a-h indicate the input prompt (decoder input ids) that are offset by 1. Now, we only override the -1 tokens in our prediction. """ input_ids = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) bsz, num_codebooks, seq_len = input_ids.shape max_length = max_length if max_length is not None else self.generation_config.max_length input_ids_shifted = torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1 channel_codebooks = num_codebooks // 2 if self.config.audio_channels == 2 else num_codebooks if max_length < 2 * channel_codebooks - 1: return (input_ids.reshape(bsz * num_codebooks, -1), input_ids_shifted.reshape(bsz * num_codebooks, -1)) for codebook in range(channel_codebooks): if self.config.audio_channels == 1: input_ids_shifted[:, codebook, codebook:seq_len + codebook] = input_ids[:, codebook] else: input_ids_shifted[:, 2 * codebook, codebook:seq_len + codebook] = input_ids[:, 2 * codebook] input_ids_shifted[:, 2 * codebook + 1, codebook:seq_len + codebook] = input_ids[:, 2 * codebook + 1] delay_pattern = torch.triu(torch.ones((channel_codebooks, max_length), dtype=torch.bool), diagonal=max_length - channel_codebooks + 1) delay_pattern = delay_pattern + torch.tril(torch.ones((channel_codebooks, max_length), dtype=torch.bool)) if self.config.audio_channels == 2: delay_pattern = delay_pattern.repeat_interleave(2, dim=0) mask = ~delay_pattern.to(input_ids.device) input_ids = mask * input_ids_shifted + ~mask * pad_token_id first_codebook_ids = input_ids[:, 0, :] start_ids = (first_codebook_ids == -1).nonzero()[:, 1] if len(start_ids) > 0: first_start_id = min(start_ids) else: first_start_id = seq_len pattern_mask = input_ids.reshape(bsz * num_codebooks, -1) input_ids = input_ids[..., :first_start_id].reshape(bsz * num_codebooks, -1) return (input_ids, pattern_mask) @staticmethod def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask): """Apply a delay pattern mask to the decoder input ids, only preserving predictions where the mask is set to -1, and otherwise setting to the value detailed in the mask.""" seq_len = input_ids.shape[-1] decoder_pad_token_mask = decoder_pad_token_mask[..., :seq_len] input_ids = torch.where(decoder_pad_token_mask == -1, input_ids, decoder_pad_token_mask) return input_ids @torch.no_grad() def generate(self, inputs: Optional[torch.Tensor]=None, generation_config: Optional[GenerationConfig]=None, logits_processor: Optional[LogitsProcessorList]=None, stopping_criteria: Optional[StoppingCriteriaList]=None, synced_gpus: Optional[bool]=None, streamer: Optional['BaseStreamer']=None, **kwargs): """ Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed to avoid deadlocking with `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3). streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. kwargs (`dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateDecoderOnlyOutput`], - [`~generation.GenerateBeamDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() requires_attention_mask = 'encoder_outputs' not in model_kwargs kwargs_has_attention_mask = model_kwargs.get('attention_mask', None) is not None input_ids, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, generation_config.bos_token_id, model_kwargs) batch_size = input_ids.shape[0] // self.num_codebooks self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=input_ids.device) model_kwargs['use_cache'] = generation_config.use_cache model_kwargs['guidance_scale'] = generation_config.guidance_scale if model_kwargs.get('attention_mask', None) is None and requires_attention_mask: model_kwargs['attention_mask'] = self._prepare_attention_mask_for_generation(input_ids, generation_config, model_kwargs) input_ids_length = input_ids.shape[-1] has_default_max_length = kwargs.get('max_length') is None and generation_config.max_length is not None has_default_min_length = kwargs.get('min_length') is None and generation_config.min_length is not None generation_config = self._prepare_generated_length(generation_config=generation_config, has_default_max_length=has_default_max_length, has_default_min_length=has_default_min_length, model_input_name=model_input_name, inputs_tensor=input_ids, input_ids_length=input_ids_length) input_ids, delay_pattern_mask = self.build_delay_pattern_mask(input_ids, pad_token_id=generation_config._decoder_start_token_tensor, max_length=generation_config.max_length) if streamer is not None: streamer.put(input_ids.cpu()) model_kwargs['delay_pattern_mask'] = delay_pattern_mask generation_mode = generation_config.get_generation_mode() if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) generation_config.guidance_scale = None logits_processor = self._get_logits_processor(generation_config=generation_config, input_ids_seq_length=input_ids_length, encoder_input_ids=input_ids, prefix_allowed_tokens_fn=None, logits_processor=logits_processor, device=input_ids.device) stopping_criteria = self._get_stopping_criteria(generation_config=generation_config, stopping_criteria=stopping_criteria) if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH): input_ids, model_kwargs = self._expand_inputs_for_generation(input_ids=input_ids, expand_size=generation_config.num_return_sequences, **model_kwargs) outputs = self._sample(input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, generation_config=generation_config, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs) else: raise ValueError('Got incompatible mode for generation, should be one of greedy or sampling. Ensure that beam search is de-activated by setting `num_beams=1`.') if generation_config.return_dict_in_generate: output_ids = outputs.sequences else: output_ids = outputs output_ids = self.apply_delay_pattern_mask(output_ids, model_kwargs['delay_pattern_mask']) output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape(batch_size, self.num_codebooks, -1) if generation_config.return_dict_in_generate: outputs.sequences = output_ids return outputs else: return output_ids
null
17
4
37
5
21
10
3
0.48
2
11
5
0
11
3
12
13
458
74
260
88
208
125
135
49
122
10
2
2
40
4,116
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
transformers.models.musicgen_melody.modeling_musicgen_melody.MusicgenMelodyForConditionalGeneration
from ...generation import ClassifierFreeGuidanceLogitsProcessor, GenerationConfig, GenerationMixin, GenerationMode, LogitsProcessorList, StoppingCriteriaList from ..auto.modeling_auto import AutoModel, AutoModelForTextEncoding import torch from ..auto.configuration_auto import AutoConfig from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel import math from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache import inspect import copy from typing import TYPE_CHECKING, Any, Callable, Optional, Union from .configuration_musicgen_melody import MusicgenMelodyConfig, MusicgenMelodyDecoderConfig from ...utils import auto_docstring, is_torch_flex_attn_available, logging import torch.nn as nn @auto_docstring class MusicgenMelodyForConditionalGeneration(PreTrainedModel, GenerationMixin): config: MusicgenMelodyConfig main_input_name = 'input_ids' supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def __init__(self, config: MusicgenMelodyConfig=None, text_encoder: Optional[PreTrainedModel]=None, audio_encoder: Optional[PreTrainedModel]=None, decoder: Optional[MusicgenMelodyForCausalLM]=None): """ text_encoder (`PreTrainedModel`, *optional*): The text encoder model that encodes text into hidden states for conditioning. audio_encoder (`PreTrainedModel`, *optional*): The audio encoder model that encodes audio into hidden states for conditioning. decoder (`MusicgenMelodyForCausalLM`, *optional*): The decoder model that generates audio tokens based on conditioning signals. """ if config is None and None in (text_encoder, audio_encoder, decoder): raise ValueError('Either a configuration has to be provided, or all three of text encoder, audio encoder and Musicgen Melody decoder.') if config is None: config = MusicgenMelodyConfig.from_sub_models_config(text_encoder.config, audio_encoder.config, decoder.config) elif not isinstance(config, self.config_class): raise ValueError(f'Config: {config} has to be of type {self.config_class}') super().__init__(config) if text_encoder is None: text_encoder = AutoModelForTextEncoding.from_config(config.text_encoder) if audio_encoder is None: audio_encoder = AutoModel.from_config(config.audio_encoder) if decoder is None: decoder = MusicgenMelodyForCausalLM._from_config(config.decoder) self.text_encoder = text_encoder self.audio_encoder = audio_encoder self.decoder = decoder self.config.text_encoder._attn_implementation = self.text_encoder.config._attn_implementation self.config.audio_encoder._attn_implementation = self.audio_encoder.config._attn_implementation self.config.decoder._attn_implementation = self.decoder.config._attn_implementation self.text_encoder.config = self.config.text_encoder self.audio_encoder.config = self.config.audio_encoder self.decoder.config = self.config.decoder if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size: self.enc_to_dec_proj = nn.Linear(self.text_encoder.config.hidden_size, self.decoder.config.hidden_size) if self.config.num_chroma != self.decoder.config.hidden_size: self.audio_enc_to_dec_proj = nn.Linear(self.config.num_chroma, self.decoder.config.hidden_size) if self.text_encoder.get_output_embeddings() is not None: raise ValueError(f'The encoder {self.text_encoder} should not have a LM Head. Please use a model without and LM Head') self.post_init() def _init_weights(self, module): std = self.decoder.config.initializer_factor if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() def tie_weights(self): if self.config.tie_encoder_decoder: decoder_base_model_prefix = self.decoder.base_model_prefix tied_weights = self._tie_encoder_decoder_weights(self.text_encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix, 'text_encoder') self._dynamic_tied_weights_keys = tied_weights def get_text_encoder(self): return self.text_encoder def get_encoder(self): return self.get_text_encoder() def get_input_embeddings(self): return self.text_encoder.get_input_embeddings() def get_output_embeddings(self): return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.decoder.set_output_embeddings(new_embeddings) @classmethod def from_sub_models_pretrained(cls, text_encoder_pretrained_model_name_or_path: Optional[str]=None, audio_encoder_pretrained_model_name_or_path: Optional[str]=None, decoder_pretrained_model_name_or_path: Optional[str]=None, *model_args, **kwargs) -> PreTrainedModel: """ Instantiate a text encoder, an audio encoder, and a MusicGen decoder from one, two or three base classes of the library from pretrained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: text_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the text encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. audio_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the audio encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the decoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the text encoder configuration, use the prefix *text_encoder_* for each configuration parameter. - To update the audio encoder configuration, use the prefix *audio_encoder_* for each configuration parameter. - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import MusicgenMelodyForConditionalGeneration >>> # initialize a musicgen model from a t5 text encoder, encodec audio encoder, and musicgen decoder >>> model = MusicgenMelodyForConditionalGeneration.from_sub_models_pretrained( ... text_encoder_pretrained_model_name_or_path="google-t5/t5-base", ... audio_encoder_pretrained_model_name_or_path="facebook/encodec_24khz", ... decoder_pretrained_model_name_or_path="facebook/musicgen-melody", ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./musicgen-ft") >>> # load fine-tuned model >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("./musicgen-ft") ```""" kwargs_text_encoder = {argument[len('text_encoder_'):]: value for argument, value in kwargs.items() if argument.startswith('text_encoder_')} kwargs_audio_encoder = {argument[len('audio_encoder_'):]: value for argument, value in kwargs.items() if argument.startswith('audio_encoder_')} kwargs_decoder = {argument[len('decoder_'):]: value for argument, value in kwargs.items() if argument.startswith('decoder_')} for key in kwargs_text_encoder: del kwargs['text_encoder_' + key] for key in kwargs_audio_encoder: del kwargs['audio_encoder_' + key] for key in kwargs_decoder: del kwargs['decoder_' + key] text_encoder = kwargs_text_encoder.pop('model', None) if text_encoder is None: if text_encoder_pretrained_model_name_or_path is None: raise ValueError('If `text_encoder_model` is not defined as an argument, a `text_encoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_text_encoder: encoder_config, kwargs_text_encoder = AutoConfig.from_pretrained(text_encoder_pretrained_model_name_or_path, **kwargs_text_encoder, return_unused_kwargs=True) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info(f'Initializing {text_encoder_pretrained_model_name_or_path} as a text_encoder model from a decoder model. Cross-attention and causal mask are disabled.') encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_text_encoder['config'] = encoder_config text_encoder = AutoModel.from_pretrained(text_encoder_pretrained_model_name_or_path, *model_args, **kwargs_text_encoder) audio_encoder = kwargs_audio_encoder.pop('model', None) if audio_encoder is None: if audio_encoder_pretrained_model_name_or_path is None: raise ValueError('If `audio_encoder_model` is not defined as an argument, an `audio_encoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_audio_encoder: encoder_config, kwargs_audio_encoder = AutoConfig.from_pretrained(audio_encoder_pretrained_model_name_or_path, **kwargs_audio_encoder, return_unused_kwargs=True) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info(f'Initializing {audio_encoder_pretrained_model_name_or_path} as an audio_encoder model from a decoder model. Cross-attention and causal mask are disabled.') encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_audio_encoder['config'] = encoder_config audio_encoder = AutoModel.from_pretrained(audio_encoder_pretrained_model_name_or_path, *model_args, **kwargs_audio_encoder) decoder = kwargs_decoder.pop('model', None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError('If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_decoder: decoder_config, kwargs_decoder = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True) if isinstance(decoder_config, MusicgenMelodyConfig): decoder_config = decoder_config.decoder if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info(f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers.") decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder['config'] = decoder_config if kwargs_decoder['config'].is_decoder is False or kwargs_decoder['config'].add_cross_attention is False: logger.warning(f'Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to `.from_sub_models_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_sub_models_pretrained(...)`') decoder = MusicgenMelodyForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) config = MusicgenMelodyConfig.from_sub_models_config(text_encoder.config, audio_encoder.config, decoder.config, **kwargs) return cls(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder, config=config) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.BoolTensor]=None, input_features: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Cache]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, MusicgenMelodyOutputWithPast]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) <Tip warning={true}> The `decoder_input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `decoder_input_ids`. </Tip> decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of conditional hidden-states representing the concatenation of the projected text encoder output and the projected audio encoder output. Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration >>> import torch >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> inputs = processor( ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> pad_token_id = model.generation_config.pad_token_id >>> decoder_input_ids = ( ... torch.ones((inputs.input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long) ... * pad_token_id ... ) >>> logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits >>> logits.shape # (bsz * num_codebooks, encoder_len + tgt_len, vocab_size) torch.Size([8, 249, 2048]) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict kwargs_text_encoder = {argument[len('text_encoder_')]: value for argument, value in kwargs.items() if argument.startswith('text_encoder_')} kwargs_decoder = {argument[len('decoder_'):]: value for argument, value in kwargs.items() if argument.startswith('decoder_')} if encoder_hidden_states is None: if inputs_embeds is not None or input_ids is not None: encoder_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs_text_encoder) encoder_hidden_states = encoder_outputs[0] if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) if attention_mask is not None and encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states * attention_mask[..., None] if encoder_hidden_states is not None and input_features is None: input_features = torch.zeros((encoder_hidden_states.shape[0], 1, self.config.num_chroma), device=self.device, dtype=self.dtype) input_features[:, :, 0] = 1 if input_features is not None: audio_hidden_states = input_features if self.config.num_chroma != self.decoder.config.hidden_size: audio_hidden_states = self.audio_enc_to_dec_proj(audio_hidden_states) if audio_hidden_states.shape[1] < self.config.chroma_length: n_repeat = int(math.ceil(self.config.chroma_length / audio_hidden_states.shape[1])) audio_hidden_states = audio_hidden_states.repeat(1, n_repeat, 1) else: logger.warning(f'The conditional audio signal is of length {audio_hidden_states.shape[1]}, which exceedsthe maximum chroma duration of {self.config.chroma_length}.The audio will be truncated to {self.config.chroma_length} frames.') audio_hidden_states = audio_hidden_states[:, :self.config.chroma_length] if encoder_hidden_states is not None: encoder_hidden_states = torch.cat([audio_hidden_states, encoder_hidden_states], dim=1) else: encoder_hidden_states = audio_hidden_states if labels is not None and (decoder_input_ids is None and decoder_inputs_embeds is None): decoder_input_ids = shift_tokens_right(labels, self.config.decoder.pad_token_id, self.config.decoder.bos_token_id) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, past_key_values=past_key_values, return_dict=return_dict, labels=labels, **kwargs_decoder) if not return_dict: return decoder_outputs + (encoder_hidden_states,) return MusicgenMelodyOutputWithPast(loss=decoder_outputs.loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, encoder_hidden_states=encoder_hidden_states) def prepare_inputs_for_generation(self, decoder_input_ids, encoder_hidden_states=None, past_key_values=None, attention_mask=None, decoder_attention_mask=None, decoder_head_mask=None, use_cache=None, decoder_delay_pattern_mask=None, guidance_scale=None, **kwargs): if decoder_delay_pattern_mask is None: decoder_input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask(decoder_input_ids, self.generation_config.pad_token_id, max_length=self.generation_config.max_length) decoder_input_ids = self.decoder.apply_delay_pattern_mask(decoder_input_ids, decoder_delay_pattern_mask) if guidance_scale is not None and guidance_scale > 1: decoder_input_ids = decoder_input_ids.repeat((2, 1)) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.repeat((2, 1)) if past_key_values is not None: past_length = past_key_values.get_seq_length() if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] encoder_hidden_states = None return {'input_ids': None, 'encoder_hidden_states': encoder_hidden_states, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'decoder_head_mask': decoder_head_mask, 'use_cache': use_cache} def _prepare_decoder_input_ids_for_generation(self, batch_size: int, model_input_name: str, model_kwargs: dict[str, torch.Tensor], decoder_start_token_id: Optional[int]=None, bos_token_id: Optional[int]=None, device: Optional[torch.device]=None) -> tuple[torch.LongTensor, dict[str, torch.Tensor]]: """Prepares `decoder_input_ids` for generation with encoder-decoder models""" if model_kwargs is not None and 'decoder_input_ids' in model_kwargs: decoder_input_ids = model_kwargs.pop('decoder_input_ids') elif 'input_ids' in model_kwargs and model_input_name != 'input_ids': decoder_input_ids = model_kwargs.pop('input_ids') else: decoder_input_ids = None decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) if device is None: device = self.device decoder_input_ids_start = torch.ones((batch_size * self.decoder.num_codebooks, 1), dtype=torch.long, device=device) * decoder_start_token_id if decoder_input_ids is None: decoder_input_ids = decoder_input_ids_start elif (decoder_input_ids[..., 0] != decoder_start_token_id).all().item(): decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1) if 'decoder_attention_mask' in model_kwargs: decoder_attention_mask = model_kwargs['decoder_attention_mask'] decoder_attention_mask = torch.cat((torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), dim=-1) model_kwargs['decoder_attention_mask'] = decoder_attention_mask return (decoder_input_ids, model_kwargs) def _prepare_encoder_hidden_states_kwargs_for_generation(self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str], generation_config: GenerationConfig) -> dict[str, Any]: encoder_hidden_states = None encoder_attention_mask = model_kwargs.pop('attention_mask') guidance_scale = generation_config.guidance_scale if inputs_tensor is not None: encoder = self.get_text_encoder() if hasattr(encoder, '_hf_hook'): encoder._hf_hook.io_same_device = True irrelevant_prefix = ['decoder_', 'use_cache'] encoder_kwargs = {argument: value for argument, value in model_kwargs.items() if not any((argument.startswith(p) for p in irrelevant_prefix))} encoder_signature = set(inspect.signature(encoder.forward).parameters) encoder_accepts_wildcard = 'kwargs' in encoder_signature or 'model_kwargs' in encoder_signature if not encoder_accepts_wildcard: encoder_kwargs = {argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature} encoder_kwargs['output_attentions'] = generation_config.output_attentions encoder_kwargs['output_hidden_states'] = generation_config.output_hidden_states model_input_name = model_input_name if model_input_name is not None else self.text_encoder.main_input_name encoder_kwargs['return_dict'] = True encoder_kwargs[model_input_name] = inputs_tensor if encoder_attention_mask is not None: encoder_kwargs['attention_mask'] = encoder_attention_mask encoder_hidden_states = encoder(**encoder_kwargs).last_hidden_state if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) if guidance_scale is not None and guidance_scale > 1: encoder_hidden_states = torch.concatenate([encoder_hidden_states, torch.zeros_like(encoder_hidden_states)], dim=0) if encoder_attention_mask is not None: encoder_attention_mask = torch.concatenate([encoder_attention_mask, torch.zeros_like(encoder_attention_mask)], dim=0) if encoder_attention_mask is not None: encoder_hidden_states = encoder_hidden_states * encoder_attention_mask[..., None] audio_hidden_states = model_kwargs.get('input_features', None) if inputs_tensor is not None: if audio_hidden_states is not None: null_audio_hidden_states = torch.zeros_like(audio_hidden_states) else: null_audio_hidden_states = torch.zeros((inputs_tensor.shape[0], 1, self.config.num_chroma), device=self.device, dtype=self.dtype) null_audio_hidden_states[:, :, 0] = 1 if audio_hidden_states is None: audio_hidden_states = null_audio_hidden_states if audio_hidden_states is not None: if guidance_scale is not None and guidance_scale > 1: audio_hidden_states = torch.concatenate([audio_hidden_states, null_audio_hidden_states], dim=0) if self.config.num_chroma != self.decoder.config.hidden_size: audio_hidden_states = self.audio_enc_to_dec_proj(audio_hidden_states) if audio_hidden_states.shape[1] < self.config.chroma_length: n_repeat = int(math.ceil(self.config.chroma_length / audio_hidden_states.shape[1])) audio_hidden_states = audio_hidden_states.repeat(1, n_repeat, 1) audio_hidden_states = audio_hidden_states[:, :self.config.chroma_length] if encoder_hidden_states is not None: encoder_hidden_states = torch.cat([audio_hidden_states, encoder_hidden_states], dim=1) else: encoder_hidden_states = audio_hidden_states model_kwargs['encoder_hidden_states'] = encoder_hidden_states return model_kwargs def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.decoder.pad_token_id, self.config.decoder.bos_token_id) def resize_token_embeddings(self, *args, **kwargs): raise NotImplementedError('Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...))') def _maybe_initialize_input_ids_for_generation(self, inputs: Optional[torch.Tensor]=None, bos_token_id: Optional[int]=None, model_kwargs: Optional[dict[str, torch.Tensor]]=None) -> torch.LongTensor: """Initializes input ids for generation, if necessary.""" if inputs is not None: return inputs if bos_token_id is None: raise ValueError('`bos_token_id` has to be defined when no `input_ids` are provided.') batch_size = 1 for value in model_kwargs.values(): if isinstance(value, torch.Tensor): batch_size = value.shape[0] break return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id def freeze_audio_encoder(self): """ Freeze the audio encoder weights. """ for param in self.audio_encoder.parameters(): param.requires_grad = False self.audio_encoder._requires_grad = False def freeze_text_encoder(self): """ Freeze the text encoder weights. """ for param in self.text_encoder.parameters(): param.requires_grad = False self.text_encoder._requires_grad = False def _get_decoder_start_token_id(self, decoder_start_token_id: Optional[Union[int, list[int]]]=None, bos_token_id: Optional[int]=None) -> int: decoder_start_token_id = decoder_start_token_id if decoder_start_token_id is not None else self.generation_config.decoder_start_token_id bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif bos_token_id is not None: return bos_token_id raise ValueError('`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation.') @torch.no_grad() def generate(self, inputs: Optional[torch.Tensor]=None, generation_config: Optional[GenerationConfig]=None, logits_processor: Optional[LogitsProcessorList]=None, stopping_criteria: Optional[StoppingCriteriaList]=None, synced_gpus: Optional[bool]=None, streamer: Optional['BaseStreamer']=None, **kwargs): """ Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed to avoid deadlocking with `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3). streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. kwargs (`dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateDecoderOnlyOutput`], - [`~generation.GenerateBeamDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() requires_attention_mask = 'encoder_outputs' not in model_kwargs kwargs_has_attention_mask = model_kwargs.get('attention_mask', None) is not None inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, generation_config.bos_token_id, model_kwargs) batch_size = inputs_tensor.shape[0] self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=inputs_tensor.device) model_kwargs['use_cache'] = generation_config.use_cache model_kwargs['guidance_scale'] = generation_config.guidance_scale if model_kwargs.get('attention_mask', None) is None and requires_attention_mask: model_kwargs['attention_mask'] = self._prepare_attention_mask_for_generation(inputs_tensor, generation_config, model_kwargs) if 'encoder_hidden_states' not in model_kwargs: model_kwargs = self._prepare_encoder_hidden_states_kwargs_for_generation(inputs_tensor, model_kwargs, model_input_name, generation_config) input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(batch_size=batch_size, model_input_name=model_input_name, model_kwargs=model_kwargs, decoder_start_token_id=generation_config._decoder_start_token_tensor, bos_token_id=generation_config._bos_token_tensor, device=inputs_tensor.device) input_ids_length = input_ids.shape[-1] has_default_max_length = kwargs.get('max_length') is None and generation_config.max_length is not None has_default_min_length = kwargs.get('min_length') is None and generation_config.min_length is not None generation_config = self._prepare_generated_length(generation_config=generation_config, has_default_max_length=has_default_max_length, has_default_min_length=has_default_min_length, model_input_name=model_input_name, inputs_tensor=inputs_tensor, input_ids_length=input_ids_length) self._validate_generated_length(generation_config, input_ids_length, has_default_max_length) max_cache_length = generation_config.max_length - 1 if inputs_tensor.shape[1] != input_ids_length and model_input_name == 'inputs_embeds' and (not self.config.is_encoder_decoder): max_cache_length += inputs_tensor.shape[1] self._prepare_cache_for_generation(generation_config, model_kwargs, generation_mode=None, batch_size=batch_size, max_cache_length=max_cache_length) input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask(input_ids, pad_token_id=generation_config._decoder_start_token_tensor, max_length=generation_config.max_length) model_kwargs['decoder_delay_pattern_mask'] = decoder_delay_pattern_mask if streamer is not None: streamer.put(input_ids.cpu()) generation_mode = generation_config.get_generation_mode() if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) generation_config.guidance_scale = None logits_processor = self._get_logits_processor(generation_config=generation_config, input_ids_seq_length=input_ids_length, encoder_input_ids=inputs_tensor, prefix_allowed_tokens_fn=None, logits_processor=logits_processor, device=input_ids.device) stopping_criteria = self._get_stopping_criteria(generation_config=generation_config, stopping_criteria=stopping_criteria) if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH): input_ids, model_kwargs = self._expand_inputs_for_generation(input_ids=input_ids, expand_size=generation_config.num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs) outputs = self._sample(input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, generation_config=generation_config, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs) else: raise ValueError('Got incompatible mode for generation, should be one of greedy or sampling. Ensure that beam search is de-activated by setting `num_beams=1`.') if generation_config.return_dict_in_generate: output_ids = outputs.sequences else: output_ids = outputs output_ids = self.decoder.apply_delay_pattern_mask(output_ids, model_kwargs['decoder_delay_pattern_mask']) output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape(batch_size, self.decoder.num_codebooks, -1) output_ids = output_ids[None, ...] audio_scales = model_kwargs.get('audio_scales') if audio_scales is None: audio_scales = [None] * batch_size if self.decoder.config.audio_channels == 1: output_values = self.audio_encoder.decode(output_ids, audio_scales=audio_scales).audio_values else: codec_outputs_left = self.audio_encoder.decode(output_ids[:, :, ::2, :], audio_scales=audio_scales) output_values_left = codec_outputs_left.audio_values codec_outputs_right = self.audio_encoder.decode(output_ids[:, :, 1::2, :], audio_scales=audio_scales) output_values_right = codec_outputs_right.audio_values output_values = torch.cat([output_values_left, output_values_right], dim=1) if generation_config.return_dict_in_generate: outputs.sequences = output_values return outputs else: return output_values
null
25
8
44
6
28
9
5
0.33
2
17
8
0
21
7
22
22
995
160
627
177
522
210
330
95
307
18
1
3
116
4,117
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
transformers.models.musicgen_melody.modeling_musicgen_melody.MusicgenMelodyModel
from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from typing import TYPE_CHECKING, Any, Callable, Optional, Union from .configuration_musicgen_melody import MusicgenMelodyConfig, MusicgenMelodyDecoderConfig from ...utils import auto_docstring, is_torch_flex_attn_available, logging import torch.nn as nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache @auto_docstring class MusicgenMelodyModel(MusicgenMelodyPreTrainedModel): def __init__(self, config: MusicgenMelodyDecoderConfig): super().__init__(config) self.decoder = MusicgenMelodyDecoder(config) self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPast]: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states representing the concatenation of the text encoder output and the processed audio encoder output. Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing attention on conditional hidden states. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict decoder_outputs = self.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs return BaseModelOutputWithPast(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions)
@auto_docstring class MusicgenMelodyModel(MusicgenMelodyPreTrainedModel): def __init__(self, config: MusicgenMelodyDecoderConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPast]: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states representing the concatenation of the text encoder output and the processed audio encoder output. Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing attention on conditional hidden states. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) ''' pass
7
1
11
1
10
0
2
0.06
1
6
3
0
5
1
5
6
63
7
53
22
33
3
20
8
14
6
2
1
10
4,118
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
transformers.models.musicgen_melody.modeling_musicgen_melody.MusicgenMelodyOutputWithPast
from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache import torch from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Callable, Optional, Union import torch.nn as nn from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput @dataclass @auto_docstring(custom_intro='\n Base class for Musicgen Melody autoregressive outputs.\n ') class MusicgenMelodyOutputWithPast(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of conditional hidden-states representing the concatenation of the projected text encoder output and the projected audio encoder output. Used as a conditional signal. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring(custom_intro='\n Base class for Musicgen Melody autoregressive outputs.\n ') class MusicgenMelodyOutputWithPast(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of conditional hidden-states representing the concatenation of the projected text encoder output and the projected audio encoder output. Used as a conditional signal. ''' pass
3
1
0
0
0
0
0
3.57
1
0
0
0
0
0
0
0
37
5
7
7
6
25
7
7
6
0
1
0
0
4,119
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
transformers.models.musicgen_melody.modeling_musicgen_melody.MusicgenMelodyPreTrainedModel
from .configuration_musicgen_melody import MusicgenMelodyConfig, MusicgenMelodyDecoderConfig from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...utils import auto_docstring, is_torch_flex_attn_available, logging import torch.nn as nn @auto_docstring class MusicgenMelodyPreTrainedModel(PreTrainedModel): config: MusicgenMelodyDecoderConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['MusicgenMelodyDecoderLayer', 'MusicgenMelodyAttention'] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def _init_weights(self, module): std = self.config.initializer_factor if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
@auto_docstring class MusicgenMelodyPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
10
0
10
0
5
0.24
1
0
0
3
1
0
1
1
23
2
17
9
15
4
16
9
14
5
1
2
5
4,120
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
transformers.models.musicgen_melody.modeling_musicgen_melody.MusicgenMelodySinusoidalPositionalEmbedding
import math import torch import torch.nn as nn class MusicgenMelodySinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int): super().__init__() self.embedding_dim = embedding_dim self.make_weights(num_positions, embedding_dim) def make_weights(self, num_embeddings: int, embedding_dim: int): emb_weights = self.get_embedding(num_embeddings, embedding_dim) if hasattr(self, 'weights'): emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer('weights', emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.cos(emb), torch.sin(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, inputs_embeds: torch.Tensor, past_key_values_length: int=0): bsz, seq_len, _ = inputs_embeds.size() position_ids = (torch.arange(seq_len) + past_key_values_length).to(inputs_embeds.device) if seq_len > self.weights.size(0): self.make_weights(seq_len, self.embedding_dim) return self.weights.index_select(0, position_ids.view(-1)).detach()
class MusicgenMelodySinusoidalPositionalEmbedding(nn.Module): '''This module produces sinusoidal positional embeddings of any length.''' def __init__(self, num_positions: int, embedding_dim: int): pass def make_weights(self, num_embeddings: int, embedding_dim: int): pass @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int): ''' Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". ''' pass @torch.no_grad() def forward(self, inputs_embeds: torch.Tensor, past_key_values_length: int=0): pass
7
2
9
0
7
2
2
0.34
1
3
0
0
3
2
4
14
44
5
29
14
22
10
27
12
22
2
1
1
7
4,121
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/musicgen_melody/processing_musicgen_melody.py
transformers.models.musicgen_melody.processing_musicgen_melody.MusicgenMelodyProcessor
from ...utils import to_numpy import numpy as np from ...processing_utils import ProcessorMixin from typing import Any from ...utils.import_utils import requires @requires(backends=('torchaudio',)) class MusicgenMelodyProcessor(ProcessorMixin): """ Constructs a MusicGen Melody processor which wraps a Wav2Vec2 feature extractor - for raw audio waveform processing - and a T5 tokenizer into a single processor class. [`MusicgenProcessor`] offers all the functionalities of [`MusicgenMelodyFeatureExtractor`] and [`T5Tokenizer`]. See [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information. Args: feature_extractor (`MusicgenMelodyFeatureExtractor`): An instance of [`MusicgenMelodyFeatureExtractor`]. The feature extractor is a required input. tokenizer (`T5Tokenizer`): An instance of [`T5Tokenizer`]. The tokenizer is a required input. """ feature_extractor_class = 'MusicgenMelodyFeatureExtractor' tokenizer_class = ('T5Tokenizer', 'T5TokenizerFast') def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps) def __call__(self, *args, **kwargs): """ Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text` argument to [`~T5Tokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. """ if len(args) > 0: kwargs['audio'] = args[0] return super().__call__(*args, **kwargs) def batch_decode(self, *args, **kwargs): """ This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ audio_values = kwargs.pop('audio', None) attention_mask = kwargs.pop('attention_mask', None) if len(args) > 0: audio_values = args[0] args = args[1:] if audio_values is not None: return self._decode_audio(audio_values, attention_mask=attention_mask) else: return self.tokenizer.batch_decode(*args, **kwargs) def _decode_audio(self, audio_values, attention_mask: Any=None) -> list[np.ndarray]: """ This method strips any padding from the audio values to return a list of numpy audio arrays. """ audio_values = to_numpy(audio_values) bsz, channels, seq_len = audio_values.shape if attention_mask is None: return list(audio_values) attention_mask = to_numpy(attention_mask) difference = seq_len - attention_mask.shape[-1] padding_value = 1 - self.feature_extractor.padding_value attention_mask = np.pad(attention_mask, ((0, 0), (0, difference)), 'constant', constant_values=padding_value) audio_values = audio_values.tolist() for i in range(bsz): sliced_audio = np.asarray(audio_values[i])[attention_mask[i][None, :] != self.feature_extractor.padding_value] audio_values[i] = sliced_audio.reshape(channels, -1) return audio_values def get_unconditional_inputs(self, num_samples=1, return_tensors='pt'): """ Helper function to get null inputs for unconditional generation, enabling the model to be used without the feature extractor or tokenizer. Args: num_samples (int, *optional*): Number of audio samples to unconditionally generate. Example: ```python >>> from transformers import MusicgenMelodyForConditionalGeneration, MusicgenMelodyProcessor >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> # get the unconditional (or 'null') inputs for the model >>> processor = MusicgenMelodyProcessor.from_pretrained("facebook/musicgen-melody") >>> unconditional_inputs = processor.get_unconditional_inputs(num_samples=1) >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256) ```""" inputs = self.tokenizer([''] * num_samples, return_tensors=return_tensors, return_attention_mask=True) inputs['attention_mask'][:] = 0 return inputs
@requires(backends=('torchaudio',)) class MusicgenMelodyProcessor(ProcessorMixin): ''' Constructs a MusicGen Melody processor which wraps a Wav2Vec2 feature extractor - for raw audio waveform processing - and a T5 tokenizer into a single processor class. [`MusicgenProcessor`] offers all the functionalities of [`MusicgenMelodyFeatureExtractor`] and [`T5Tokenizer`]. See [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information. Args: feature_extractor (`MusicgenMelodyFeatureExtractor`): An instance of [`MusicgenMelodyFeatureExtractor`]. The feature extractor is a required input. tokenizer (`T5Tokenizer`): An instance of [`T5Tokenizer`]. The tokenizer is a required input. ''' def __init__(self, feature_extractor, tokenizer): pass def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): pass def __call__(self, *args, **kwargs): ''' Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text` argument to [`~T5Tokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. ''' pass def batch_decode(self, *args, **kwargs): ''' This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. ''' pass def _decode_audio(self, audio_values, attention_mask: Any=None) -> list[np.ndarray]: ''' This method strips any padding from the audio values to return a list of numpy audio arrays. ''' pass def get_unconditional_inputs(self, num_samples=1, return_tensors='pt'): ''' Helper function to get null inputs for unconditional generation, enabling the model to be used without the feature extractor or tokenizer. Args: num_samples (int, *optional*): Number of audio samples to unconditionally generate. Example: ```python >>> from transformers import MusicgenMelodyForConditionalGeneration, MusicgenMelodyProcessor >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> # get the unconditional (or 'null') inputs for the model >>> processor = MusicgenMelodyProcessor.from_pretrained("facebook/musicgen-melody") >>> unconditional_inputs = processor.get_unconditional_inputs(num_samples=1) >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256) ```''' pass
8
5
17
3
7
7
2
1.24
1
4
0
0
7
0
7
24
149
28
54
21
46
67
49
21
41
6
2
1
16
4,122
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/configuration_mvp.py
transformers.models.mvp.configuration_mvp.MvpConfig
import warnings from ...configuration_utils import PretrainedConfig class MvpConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MvpModel`]. It is used to instantiate a MVP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MVP [RUCAIBox/mvp](https://huggingface.co/RUCAIBox/mvp) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50267): Vocabulary size of the MVP model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MvpModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. use_prompt (`bool`, *optional*, defaults to `False`): Whether or not to use prompt. prompt_length (`int`, *optional*, defaults to 100): The length of prompt. prompt_mid_dim (`int`, *optional*, defaults to 800): Dimensionality of the "intermediate" layer in prompt. Example: ```python >>> from transformers import MvpConfig, MvpModel >>> # Initializing a MVP RUCAIBox/mvp style configuration >>> configuration = MvpConfig() >>> # Initializing a model (with random weights) from the RUCAIBox/mvp style configuration >>> model = MvpModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'mvp' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=50267, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, use_prompt=False, prompt_length=100, prompt_mid_dim=800, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding self.use_prompt = use_prompt self.prompt_length = prompt_length self.prompt_mid_dim = prompt_mid_dim super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated', False): self.forced_bos_token_id = self.bos_token_id warnings.warn(f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. The config can simply be saved and uploaded again to be fixed.')
class MvpConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MvpModel`]. It is used to instantiate a MVP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MVP [RUCAIBox/mvp](https://huggingface.co/RUCAIBox/mvp) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50267): Vocabulary size of the MVP model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MvpModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. use_prompt (`bool`, *optional*, defaults to `False`): Whether or not to use prompt. prompt_length (`int`, *optional*, defaults to 100): The length of prompt. prompt_mid_dim (`int`, *optional*, defaults to 800): Dimensionality of the "intermediate" layer in prompt. Example: ```python >>> from transformers import MvpConfig, MvpModel >>> # Initializing a MVP RUCAIBox/mvp style configuration >>> configuration = MvpConfig() >>> # Initializing a model (with random weights) from the RUCAIBox/mvp style configuration >>> model = MvpModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=50267, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, use_prompt=False, prompt_length=100, prompt_mid_dim=800, **kwargs): pass
2
1
72
2
70
1
2
0.96
1
1
0
0
1
24
1
1
155
11
74
60
41
71
32
29
30
2
1
1
2
4,123
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpAttention
from ...utils.deprecation import deprecate_kwarg from torch import nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union class MvpAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, layer_idx: Optional[bool]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, attn_prompt: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True if attn_prompt is not None: key_states = torch.cat([attn_prompt[0].expand(bsz, -1, -1, -1), key_states], dim=2) value_states = torch.cat([attn_prompt[1].expand(bsz, -1, -1, -1), value_states], dim=2) if attention_mask is not None: prompt_mask = torch.zeros(bsz, 1, tgt_len, attn_prompt[0].size(1)).to(attention_mask.device) attention_mask = torch.cat([prompt_mask, attention_mask], dim=-1) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) query_states = query_states.reshape(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped)
class MvpAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, layer_idx: Optional[bool]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, attn_prompt: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
49
7
35
7
6
0.22
1
6
0
0
3
10
3
13
153
24
106
42
86
23
72
26
68
14
1
2
17
4,124
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpClassificationHead
import torch from torch import nn class MvpClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states
class MvpClassificationHead(nn.Module): '''Head for sentence-level classification tasks.''' def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
1
9
0
9
0
1
0.05
1
4
0
0
2
3
2
12
22
2
19
12
10
1
13
6
10
1
1
0
2
4,125
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpDecoder
from .configuration_mvp import MvpConfig from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Optional, Union import math from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask class MvpDecoder(MvpPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MvpDecoderLayer`] Args: config: MvpConfig embed_tokens (nn.Embedding): output embedding use_prompt (bool): whether to use prompt """ def __init__(self, config: MvpConfig, embed_tokens: Optional[nn.Embedding]=None, use_prompt: Optional[bool]=False): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = MvpLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model) self.layers = nn.ModuleList([MvpDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.use_prompt = use_prompt if use_prompt: self.prompt_length = config.prompt_length self.self_attn_prompt = MvpPrompt(config, config.decoder_layers, config.decoder_attention_heads) self.cross_attn_prompt = MvpPrompt(config, config.decoder_layers, config.decoder_attention_heads) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input = input_ids input_shape = input_ids.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None else DynamicCache(config=self.config) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) positions = self.embed_positions(input, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.use_prompt: prompt_ids = torch.arange(self.prompt_length).to(self.device) self_attn_prompt = self.self_attn_prompt(prompt_ids) cross_attn_prompt = self.cross_attn_prompt(prompt_ids) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, self_attn_prompt=self_attn_prompt[idx] if self.use_prompt else None, cross_attn_prompt=cross_attn_prompt[idx] if self.use_prompt else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
class MvpDecoder(MvpPreTrainedModel): ''' Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MvpDecoderLayer`] Args: config: MvpConfig embed_tokens (nn.Embedding): output embedding use_prompt (bool): whether to use prompt ''' def __init__(self, config: MvpConfig, embed_tokens: Optional[nn.Embedding]=None, use_prompt: Optional[bool]=False): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
67
9
42
16
12
0.42
1
13
5
0
4
14
4
6
280
41
168
52
147
71
88
36
83
42
2
3
48
4,126
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpDecoderLayer
from .configuration_mvp import MvpConfig from torch import nn from ...utils.deprecation import deprecate_kwarg import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_layers import GradientCheckpointingLayer from ...activations import ACT2FN from typing import Optional, Union class MvpDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MvpConfig, layer_idx=None): super().__init__() self.embed_dim = config.d_model self.self_attn = MvpAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = MvpAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, self_attn_prompt: Optional[torch.Tensor]=None, cross_attn_prompt: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape `(2, decoder_attention_heads, pro_len, head_dim)`. cross_attn_prompt (`torch.FloatTensor`): prompt of cross attention of shape `(2, decoder_attention_heads, pro_len, head_dim)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, attn_prompt=self_attn_prompt, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, attn_prompt=cross_attn_prompt, past_key_values=past_key_values, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
class MvpDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MvpConfig, layer_idx=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, self_attn_prompt: Optional[torch.Tensor]=None, cross_attn_prompt: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape `(2, decoder_attention_heads, pro_len, head_dim)`. cross_attn_prompt (`torch.FloatTensor`): prompt of cross attention of shape `(2, decoder_attention_heads, pro_len, head_dim)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
4
1
61
6
41
15
4
0.35
1
5
2
0
2
11
2
12
123
12
82
34
66
29
44
21
41
6
1
1
7
4,127
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpDecoderWrapper
class MvpDecoderWrapper(MvpPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = MvpDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs)
class MvpDecoderWrapper(MvpPreTrainedModel): ''' This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. ''' def __init__(self, config): pass def forward(self, *args, **kwargs): pass
3
1
3
0
3
0
1
0.67
1
2
1
0
2
1
2
4
12
2
6
4
3
4
6
4
3
1
2
0
2
4,128
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpEncoder
from .configuration_mvp import MvpConfig from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput from typing import Optional, Union import math from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask class MvpEncoder(MvpPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`MvpEncoderLayer`]. Args: config: MvpConfig embed_tokens (nn.Embedding): output embedding use_prompt (bool): whether to use prompt """ def __init__(self, config: MvpConfig, embed_tokens: Optional[nn.Embedding]=None, use_prompt: Optional[bool]=False): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) self.embed_positions = MvpLearnedPositionalEmbedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([MvpEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.use_prompt = use_prompt if use_prompt: self.prompt_length = config.prompt_length self.self_attn_prompt = MvpPrompt(config, config.encoder_layers, config.encoder_attention_heads) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.use_prompt: prompt_ids = torch.arange(self.prompt_length).to(self.device) self_attn_prompt = self.self_attn_prompt(prompt_ids) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, self_attn_prompt=self_attn_prompt[idx] if self.use_prompt else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class MvpEncoder(MvpPreTrainedModel): ''' Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`MvpEncoderLayer`]. Args: config: MvpConfig embed_tokens (nn.Embedding): output embedding use_prompt (bool): whether to use prompt ''' def __init__(self, config: MvpConfig, embed_tokens: Optional[nn.Embedding]=None, use_prompt: Optional[bool]=False): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
45
7
29
10
8
0.39
1
12
5
0
4
13
4
6
194
31
118
41
102
46
74
30
69
27
2
3
33
4,129
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpEncoderLayer
from .configuration_mvp import MvpConfig from torch import nn import torch from ...modeling_layers import GradientCheckpointingLayer from ...activations import ACT2FN from typing import Optional, Union class MvpEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: MvpConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = MvpAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, self_attn_prompt: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape `(2, encoder_attention_heads, pro_len, head_dim)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, attn_prompt=self_attn_prompt, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) return (hidden_states, attn_weights)
class MvpEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: MvpConfig): pass def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, self_attn_prompt: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape `(2, encoder_attention_heads, pro_len, head_dim)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
34
3
25
7
2
0.25
1
4
2
0
2
9
2
12
70
6
51
23
41
13
32
16
29
3
1
1
4
4,130
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpForCausalLM
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...generation import GenerationMixin from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...utils import auto_docstring, logging from typing import Optional, Union class MvpForCausalLM(MvpPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = MvpDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder def set_lightweight_tuning(self): self.model.set_lightweight_tuning() self.lm_head.requires_grad_(False) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: """ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, MvpForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForCausalLM.from_pretrained("RUCAIBox/mvp", add_cross_attention=False) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 8, 50267] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
class MvpForCausalLM(MvpPreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def set_decoder(self, decoder): pass def get_decoder(self): pass def set_lightweight_tuning(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: ''' cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, MvpForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForCausalLM.from_pretrained("RUCAIBox/mvp", add_cross_attention=False) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 8, 50267] ```''' pass
9
1
17
3
8
7
2
0.8
2
6
2
0
9
2
10
12
188
35
85
38
57
68
44
21
33
7
2
1
17
4,131
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpForConditionalGeneration
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from .configuration_mvp import MvpConfig from ...generation import GenerationMixin from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...utils import auto_docstring, logging from typing import Optional, Union @auto_docstring(custom_intro='\n The MVP Model with a language modeling head. Can be used for various text generation tasks.\n ') class MvpForConditionalGeneration(MvpPreTrainedModel, GenerationMixin): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: MvpConfig): super().__init__(config) self.model = MvpModel(config) self.register_buffer('final_logits_bias', torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) self._resize_final_logits_bias(new_num_tokens) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer('final_logits_bias', new_bias) def set_lightweight_tuning(self): self.model.set_lightweight_tuning() self.lm_head.requires_grad_(False) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqLMOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example of summarization: Fine-tuning a model ```python >>> import torch >>> from transformers import AutoTokenizer, MvpForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp") >>> inputs = tokenizer( ... "Summarize: You may want to stick it to your boss and leave your job, but don't do it if these are your reasons.", ... return_tensors="pt", ... ) >>> labels = tokenizer("Bad Reasons To Quit Your Job", return_tensors="pt")["input_ids"] >>> loss = model(**inputs, labels=labels).loss >>> loss.backward() ``` Inference after the model fine-tuned ```python >>> with torch.no_grad(): ... generated_ids = model.generate(**inputs) >>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.') use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@auto_docstring(custom_intro='\n The MVP Model with a language modeling head. Can be used for various text generation tasks.\n ') class MvpForConditionalGeneration(MvpPreTrainedModel, GenerationMixin): def __init__(self, config: MvpConfig): pass def get_encoder(self): pass def get_decoder(self): pass def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: pass def _resize_final_logits_bias(self, new_num_tokens: int) -> None: pass def set_lightweight_tuning(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqLMOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example of summarization: Fine-tuning a model ```python >>> import torch >>> from transformers import AutoTokenizer, MvpForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp") >>> inputs = tokenizer( ... "Summarize: You may want to stick it to your boss and leave your job, but don't do it if these are your reasons.", ... return_tensors="pt", ... ) >>> labels = tokenizer("Bad Reasons To Quit Your Job", return_tensors="pt")["input_ids"] >>> loss = model(**inputs, labels=labels).loss >>> loss.backward() ``` Inference after the model fine-tuned ```python >>> with torch.no_grad(): ... generated_ids = model.generate(**inputs) >>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` ''' pass def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): pass
11
1
11
1
10
1
2
0.08
2
8
3
0
10
3
11
13
138
18
111
49
75
9
55
26
43
8
2
2
20
4,132
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpForQuestionAnswering
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput from ...utils import auto_docstring, logging from typing import Optional, Union @auto_docstring class MvpForQuestionAnswering(MvpPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = MvpModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() def set_lightweight_tuning(self): self.model.set_lightweight_tuning() self.qa_outputs.requires_grad_(False) @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Seq2SeqQuestionAnsweringModelOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: Fine-tuning a model for extrative question answering, and our model also supports generative question answering using `BartForConditionalGeneration` ```python >>> import torch >>> from transformers import AutoTokenizer, MvpForQuestionAnswering >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForQuestionAnswering.from_pretrained("RUCAIBox/mvp") >>> inputs = tokenizer( ... "Answer the following question: Who was Jim Henson? [SEP] Jim Henson was a nice puppet", ... return_tensors="pt", ... ) >>> target_start_index = torch.tensor([18]) >>> target_end_index = torch.tensor([19]) >>> loss = model(**inputs, start_positions=target_start_index, end_positions=target_end_index).loss >>> loss.backward() ``` Inference after the model fine-tuned ```python >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> predict_answer = tokenizer.decode(predict_answer_tokens) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return (total_loss,) + output if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
@auto_docstring class MvpForQuestionAnswering(MvpPreTrainedModel): def __init__(self, config): pass def set_lightweight_tuning(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Seq2SeqQuestionAnsweringModelOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: Fine-tuning a model for extrative question answering, and our model also supports generative question answering using `BartForConditionalGeneration` ```python >>> import torch >>> from transformers import AutoTokenizer, MvpForQuestionAnswering >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForQuestionAnswering.from_pretrained("RUCAIBox/mvp") >>> inputs = tokenizer( ... "Answer the following question: Who was Jim Henson? [SEP] Jim Henson was a nice puppet", ... return_tensors="pt", ... ) >>> target_start_index = torch.tensor([18]) >>> target_end_index = torch.tensor([19]) >>> loss = model(**inputs, start_positions=target_start_index, end_positions=target_end_index).loss >>> loss.backward() ``` Inference after the model fine-tuned ```python >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> predict_answer = tokenizer.decode(predict_answer_tokens) ``` ''' pass
6
1
36
3
28
4
3
0.15
1
5
2
0
3
3
3
5
114
13
88
37
64
13
39
18
35
8
2
2
10
4,133
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpForSequenceClassification
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from .configuration_mvp import MvpConfig import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput from ...utils import auto_docstring, logging from typing import Optional, Union @auto_docstring(custom_intro='\n Mvp model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ') class MvpForSequenceClassification(MvpPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: MvpConfig, **kwargs): super().__init__(config, **kwargs) self.model = MvpModel(config) self.classification_head = MvpClassificationHead(config.d_model, config.d_model, config.num_labels, config.classifier_dropout) self.post_init() def set_lightweight_tuning(self): self.model.set_lightweight_tuning() self.classification_head.requires_grad_(False) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Seq2SeqSequenceClassifierOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Example of single-label classification: Fine-tuning a model on `num_labels` classes ```python >>> import torch >>> from transformers import AutoTokenizer, MvpForSequenceClassification >>> num_labels = 2 # for example, this is a binary classification task >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForSequenceClassification.from_pretrained("RUCAIBox/mvp", num_labels=num_labels) >>> inputs = tokenizer("Classify: Hello, my dog is cute", return_tensors="pt") >>> labels = torch.tensor(1) # the real label for inputs >>> loss = model(**inputs, labels=labels).loss >>> loss.backward() ``` Inference after the model fine-tuned ```python >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax() ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}') outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError('All examples must have the same number of <eos> tokens.') sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = 'regression' elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Seq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
@auto_docstring(custom_intro='\n Mvp model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ') class MvpForSequenceClassification(MvpPreTrainedModel): def __init__(self, config: MvpConfig, **kwargs): pass def set_lightweight_tuning(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Seq2SeqSequenceClassifierOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Example of single-label classification: Fine-tuning a model on `num_labels` classes ```python >>> import torch >>> from transformers import AutoTokenizer, MvpForSequenceClassification >>> num_labels = 2 # for example, this is a binary classification task >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForSequenceClassification.from_pretrained("RUCAIBox/mvp", num_labels=num_labels) >>> inputs = tokenizer("Classify: Hello, my dog is cute", return_tensors="pt") >>> labels = torch.tensor(1) # the real label for inputs >>> loss = model(**inputs, labels=labels).loss >>> loss.backward() ``` Inference after the model fine-tuned ```python >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax() ``` ''' pass
6
1
37
3
32
2
6
0.07
1
10
4
0
3
2
3
5
118
11
101
33
78
7
43
15
39
15
2
3
17
4,134
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpLearnedPositionalEmbedding
import torch from typing import Optional, Union from torch import nn class MvpLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None): """`input_ids' shape is expected to be [bsz x seqlen].""" if position_ids is None: bsz, seq_len = input_ids.shape[:2] position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device).expand(bsz, -1) else: position_ids = position_ids.unsqueeze(0) return super().forward(position_ids + self.offset)
class MvpLearnedPositionalEmbedding(nn.Embedding): ''' This module learns positional embeddings up to a fixed maximum size. ''' def __init__(self, num_embeddings: int, embedding_dim: int): pass def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None): '''`input_ids' shape is expected to be [bsz x seqlen].''' pass
3
2
7
1
5
2
1
0.6
1
3
0
0
2
1
2
2
20
4
10
6
7
6
8
6
5
1
1
0
2
4,135
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpModel
from .configuration_mvp import MvpConfig from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...utils import auto_docstring, logging from typing import Optional, Union @auto_docstring class MvpModel(MvpPreTrainedModel): _keys_to_ignore_on_load_unexpected = ['final_logits_bias'] _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: MvpConfig): super().__init__(config) padding_idx, vocab_size = (config.pad_token_id, config.vocab_size) self.use_prompt = config.use_prompt self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = MvpEncoder(config, self.shared, config.use_prompt) self.decoder = MvpDecoder(config, self.shared, config.use_prompt) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def set_lightweight_tuning(self): assert self.use_prompt, 'If you want to use lightweight tuning, make sure that `use_prompt=True`.' self.requires_grad_(False) self.encoder.self_attn_prompt.requires_grad_(True) self.decoder.self_attn_prompt.requires_grad_(True) self.decoder.cross_attn_prompt.requires_grad_(True) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqModelOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError('If no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.') decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id, self.config.decoder_start_token_id) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class MvpModel(MvpPreTrainedModel): def __init__(self, config: MvpConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_encoder(self): pass def set_lightweight_tuning(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqModelOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. ''' pass
9
1
16
1
14
1
3
0.05
1
9
5
0
7
4
7
9
132
17
110
34
78
5
43
16
35
12
2
2
18
4,136
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpPreTrainedModel
from .configuration_mvp import MvpConfig from torch import nn import torch from ...utils import auto_docstring, logging from ...modeling_utils import PreTrainedModel @auto_docstring class MvpPreTrainedModel(PreTrainedModel): config: MvpConfig base_model_prefix = 'model' supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids} return dummy_inputs
@auto_docstring class MvpPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass @property def dummy_inputs(self): pass
5
0
9
0
9
0
3
0
1
0
0
8
2
0
2
2
25
2
23
11
19
0
18
10
15
5
1
2
6
4,137
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/modeling_mvp.py
transformers.models.mvp.modeling_mvp.MvpPrompt
from torch import nn import torch class MvpPrompt(nn.Module): """Layer-wise prompt for encoder or decoder.""" def __init__(self, config, num_layers, num_heads): super().__init__() self.prompt_length = config.prompt_length self.num_layers = num_layers self.num_heads = num_heads self.head_dim = config.d_model // num_heads self.dropout = nn.Dropout(p=config.dropout) self.prompt_embedding = nn.Embedding(config.prompt_length, config.d_model) self.prompt_trans = nn.Sequential(nn.Linear(config.d_model, config.prompt_mid_dim), nn.GELU(), nn.Linear(config.prompt_mid_dim, num_layers * 2 * config.d_model)) def forward(self, prompt_ids: torch.Tensor) -> tuple[torch.Tensor]: prompt = self.prompt_trans(self.prompt_embedding(prompt_ids)) prompt = prompt.view(self.prompt_length, self.num_layers * 2, self.num_heads, self.head_dim) prompt = self.dropout(prompt) prompt = prompt.permute([1, 2, 0, 3]).split(2) return prompt
class MvpPrompt(nn.Module): '''Layer-wise prompt for encoder or decoder.''' def __init__(self, config, num_layers, num_heads): pass def forward(self, prompt_ids: torch.Tensor) -> tuple[torch.Tensor]: pass
3
1
10
0
10
0
1
0.05
1
2
0
0
2
7
2
12
23
2
20
11
17
1
16
11
13
1
1
0
2
4,138
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/tokenization_mvp.py
transformers.models.mvp.tokenization_mvp.MvpTokenizer
from ...tokenization_utils import AddedToken, PreTrainedTokenizer import json import os import regex as re from typing import Optional class MvpTokenizer(PreTrainedTokenizer): """ Constructs a MVP tokenizer, which is smilar to the RoBERTa tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import MvpTokenizer >>> tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (MVP tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs): bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): vocab = self.encoder.copy() vocab.update(self.added_tokens_encoder) return vocab def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MVP sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())): text = ' ' + text return (text, kwargs)
class MvpTokenizer(PreTrainedTokenizer): ''' Constructs a MVP tokenizer, which is smilar to the RoBERTa tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import MvpTokenizer >>> tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (MVP tokenizer detect beginning of words by the preceding space). ''' def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs): pass @property def vocab_size(self): pass def get_vocab(self): pass def bpe(self, token): pass def _tokenize(self, text): '''Tokenize a string.''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MVP sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): pass
15
8
17
1
13
4
3
0.62
1
11
0
0
13
9
13
102
318
50
167
71
132
104
122
46
108
9
3
3
39
4,139
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mvp/tokenization_mvp_fast.py
transformers.models.mvp.tokenization_mvp_fast.MvpTokenizerFast
from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...tokenization_utils_base import AddedToken, BatchEncoding from tokenizers import processors from typing import Optional import json from .tokenization_mvp import MvpTokenizer class MvpTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" MVP tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import MvpTokenizerFast >>> tokenizer = MvpTokenizerFast.from_pretrained("RUCAIBox/mvp") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (MVP tokenizer detect beginning of words by the preceding space). trim_offsets (`bool`, *optional*, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = MvpTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs) tokenizer_component = 'post_processor' tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) if 'sep' in state: state['sep'] = tuple(state['sep']) if 'cls' in state: state['cls'] = tuple(state['cls']) changes_to_apply = False if state.get('add_prefix_space', add_prefix_space) != add_prefix_space: state['add_prefix_space'] = add_prefix_space changes_to_apply = True if state.get('trim_offsets', trim_offsets) != trim_offsets: state['trim_offsets'] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop('type')) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) @property def mask_token(self) -> str: """ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. MVP tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily comprise the space before the *<mask>*. """ if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def mask_token(self, value): """ Overriding the default behavior of the mask token to have it eat the space before it. This is needed to preserve backward compatibility with all the previously used models based on Mvp. """ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value self._mask_token = value def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if is_split_into_words and (not self.add_prefix_space): raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if is_split_into_words and (not self.add_prefix_space): raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
class MvpTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" MVP tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import MvpTokenizerFast >>> tokenizer = MvpTokenizerFast.from_pretrained("RUCAIBox/mvp") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (MVP tokenizer detect beginning of words by the preceding space). trim_offsets (`bool`, *optional*, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. ''' def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs): pass @property def mask_token(self) -> str: ''' `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. MVP tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily comprise the space before the *<mask>*. ''' pass @mask_token.setter def mask_token(self) -> str: ''' Overriding the default behavior of the mask token to have it eat the space before it. This is needed to preserve backward compatibility with all the previously used models based on Mvp. ''' pass def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: pass def _encode_plus(self, *args, **kwargs) -> BatchEncoding: pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass
11
4
18
2
13
3
4
0.82
1
6
1
0
8
1
8
96
237
44
106
45
77
87
65
25
56
14
3
2
28
4,140
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/myt5/tokenization_myt5.py
transformers.models.myt5.tokenization_myt5.ByteRewriter
import json from collections import defaultdict from typing import Optional, Union class ByteRewriter: """ Byte rewriter class for MyT5 tokenizer. This class is used to rewrite bytes using a hash tree. The hash tree is constructed from a set of rewriting rules. Args: rewriting_rules (`str` or `dict[str, str]`): A path to a json file containing the rewriting rules or a dictionary containing the rewriting rules. """ LEAF = '[LEAF]' def __init__(self, rewriting_rules: Union[str, dict[str, str]]): if isinstance(rewriting_rules, str): with open(rewriting_rules, 'r') as f: rewriting_rules = json.load(f) elif not isinstance(rewriting_rules, dict): raise TypeError(f'rewriting_rules should be either a path to json file or a dict, got {type(rewriting_rules)}') self.hash_tree = self.construct_hash_tree(rewriting_rules) reverse_rewriting_rules = {v: k for k, v in rewriting_rules.items()} self.reverse_hash_tree = self.construct_hash_tree(reverse_rewriting_rules) def add_leaf(self, hash_tree: dict[str, Union[dict, list[str]]], byte_in_sequence: str, byte_out_sequence: str): """ Add a leaf with the output byte sequence to the hash tree. """ byte_in_list = byte_in_sequence.split(' ') byte_out_list = byte_out_sequence.split(' ') tree_pointer = hash_tree for b in byte_in_list: if b not in tree_pointer: tree_pointer[b] = {} tree_pointer = tree_pointer[b] tree_pointer[self.LEAF] = byte_out_list def construct_hash_tree(self, rewriting_rules: dict[str, str]) -> dict[str, Union[dict, list[str]]]: """ Construct a hash tree for rewritten byte sequences. """ hash_tree = defaultdict(dict) for b in (f'{x:02x}' for x in range(256)): hash_tree[b][self.LEAF] = [b] for in_sequence, out_sequence in rewriting_rules.items(): self.add_leaf(hash_tree, in_sequence, out_sequence) return hash_tree def search_hash_tree(self, byte_sequence: list[str]) -> Union[None, list[str]]: """ Search the hash tree and return the rewritten byte sequence if found. """ tree_pointer = self.hash_tree for b in byte_sequence: if b in tree_pointer: tree_pointer = tree_pointer[b] else: return None return tree_pointer[self.LEAF] def rewrite_bytes(self, in_bytes: list[str], reverse=False) -> list[str]: """ Rewrite a sequence of bytes using the hash tree. Args: in_bytes (`list[str]`): A list of bytes to be rewritten. reverse (`bool`): If True, decoding is performed with the reverse hash tree. Returns: `list[str]`: The rewritten byte sequence. """ out_bytes = [] b_start = 0 b_end = 0 while b_start < len(in_bytes): tree_pointer = self.hash_tree if not reverse else self.reverse_hash_tree for j in range(b_start, len(in_bytes)): b = in_bytes[j] if b in tree_pointer: tree_pointer = tree_pointer[b] elif j == b_start: cur_leaf = [b] b_end = j break else: break if self.LEAF in tree_pointer: cur_leaf = tree_pointer[self.LEAF] b_end = j out_bytes.extend(cur_leaf) b_start = b_end + 1 return out_bytes
class ByteRewriter: ''' Byte rewriter class for MyT5 tokenizer. This class is used to rewrite bytes using a hash tree. The hash tree is constructed from a set of rewriting rules. Args: rewriting_rules (`str` or `dict[str, str]`): A path to a json file containing the rewriting rules or a dictionary containing the rewriting rules. ''' def __init__(self, rewriting_rules: Union[str, dict[str, str]]): pass def add_leaf(self, hash_tree: dict[str, Union[dict, list[str]]], byte_in_sequence: str, byte_out_sequence: str): ''' Add a leaf with the output byte sequence to the hash tree. ''' pass def construct_hash_tree(self, rewriting_rules: dict[str, str]) -> dict[str, Union[dict, list[str]]]: ''' Construct a hash tree for rewritten byte sequences. ''' pass def search_hash_tree(self, byte_sequence: list[str]) -> Union[None, list[str]]: ''' Search the hash tree and return the rewritten byte sequence if found. ''' pass def rewrite_bytes(self, in_bytes: list[str], reverse=False) -> list[str]: ''' Rewrite a sequence of bytes using the hash tree. Args: in_bytes (`list[str]`): A list of bytes to be rewritten. reverse (`bool`): If True, decoding is performed with the reverse hash tree. Returns: `list[str]`: The rewritten byte sequence. ''' pass
6
5
17
2
11
3
4
0.41
0
5
0
0
5
2
5
5
100
17
59
27
53
24
53
26
47
7
0
3
19
4,141
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/myt5/tokenization_myt5.py
transformers.models.myt5.tokenization_myt5.MyT5Tokenizer
import json import warnings from ...tokenization_utils import AddedToken, PreTrainedTokenizer from typing import Optional, Union import os class MyT5Tokenizer(PreTrainedTokenizer): """ Construct a MyT5 tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): The file containing the byte rewriting rules. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. extra_ids (`int`, *optional*, defaults to 125): Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary like in ByT5 preprocessing see [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)). additional_special_tokens (`list[str]`, *optional*): Additional special tokens used by the tokenizer. """ model_input_names = ['input_ids', 'attention_mask'] vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, eos_token='</s>', unk_token='<unk>', pad_token='<pad>', extra_ids=125, additional_special_tokens=None, **kwargs) -> None: if extra_ids > 0 and additional_special_tokens is None: additional_special_tokens = [f'<extra_id_{i}>' for i in range(extra_ids)] elif extra_ids > 0 and additional_special_tokens is not None and (len(additional_special_tokens) > 0): extra_tokens = len(set(filter(lambda x: bool('extra_id' in str(x)), additional_special_tokens))) if extra_tokens != extra_ids: raise ValueError(f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to MyT5Tokenizer. In this case the additional_special_tokens must include the extra_ids tokens') pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token} self.offset = len(self._added_tokens_decoder) self._utf_vocab_size = 2 ** 8 self.byte_maps = json.load(open(vocab_file, 'r')) self.decompose_rewriter = ByteRewriter(self.byte_maps['decompose_map']) self.merge_rewriter = ByteRewriter(self.byte_maps['merge_map']) super().__init__(eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=0, additional_special_tokens=additional_special_tokens, **kwargs) @property def vocab_size(self): return self._utf_vocab_size def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)} vocab.update(self.added_tokens_encoder) return vocab def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [0] * len(token_ids_0) + [1] return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]: """Do not add eos again if user already added it.""" if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn(f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added.') return token_ids else: return token_ids + [self.eos_token_id] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MyT5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0] def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ token_ids_0 = self._add_eos_if_not_present(token_ids_0) if token_ids_1 is None: return token_ids_0 else: token_ids_1 = self._add_eos_if_not_present(token_ids_1) return token_ids_0 + token_ids_1 def _tokenize(self, text: str, **kwargs) -> list[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words. Represents tokens in two character hex format""" tokens = [f'{i:02x}' for i in text.encode('utf-8')] tokens = self.morphological_encode(tokens) return tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if len(token) != 2: token_id = None else: token_id = int(token, 16) + self.offset return token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = f'{index - self.offset:02x}' return token def morphological_encode(self, indices: list[str]) -> list[str]: indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=False) indices = self.merge_rewriter.rewrite_bytes(indices, reverse=False) return indices def morphological_decode(self, indices: list[str]) -> list[str]: indices = self.merge_rewriter.rewrite_bytes(indices, reverse=True) indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=True) return indices def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" bstring = b'' out_tokens = [] for token in tokens: if token in self.added_tokens_decoder: out_tokens.append(self.added_tokens_decoder[token]) elif token in self.added_tokens_encoder: out_tokens.append(token) else: out_tokens.append(token) out_tokens = self.morphological_decode(out_tokens) _added_tokens = set(self.added_tokens_decoder.values()) | set(self.added_tokens_encoder) for token in out_tokens: if token in _added_tokens: bstring += bytes(token, 'utf-8') else: bstring += bytes.fromhex(token) string = bstring.decode('utf-8', errors='ignore') return string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: writer.write(json.dumps(self.byte_maps, indent=2, ensure_ascii=False)) return (vocab_file,)
class MyT5Tokenizer(PreTrainedTokenizer): ''' Construct a MyT5 tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): The file containing the byte rewriting rules. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. extra_ids (`int`, *optional*, defaults to 125): Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary like in ByT5 preprocessing see [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)). additional_special_tokens (`list[str]`, *optional*): Additional special tokens used by the tokenizer. ''' def __init__(self, vocab_file, eos_token='</s>', unk_token='<unk>', pad_token='<pad>', extra_ids=125, additional_special_tokens=None, **kwargs) -> None: pass @property def vocab_size(self): pass def get_vocab(self): pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]: '''Do not add eos again if user already added it.''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. MyT5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def _tokenize(self, text: str, **kwargs) -> list[str]: '''Take as input a string and return a list of strings (tokens) for words/sub-words. Represents tokens in two character hex format''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def morphological_encode(self, indices: list[str]) -> list[str]: pass def morphological_decode(self, indices: list[str]) -> list[str]: pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
16
9
14
1
9
4
2
0.58
1
10
1
0
14
6
14
103
243
36
132
52
101
76
90
35
75
7
3
2
34
4,142
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/configuration_nemotron.py
transformers.models.nemotron.configuration_nemotron.NemotronConfig
from ...modeling_rope_utils import rope_config_validation from ...configuration_utils import PretrainedConfig class NemotronConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`NemotronModel`]. It is used to instantiate an Nemotron model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nemotron-8B. e.g. [nvidia/nemotron-3-8b-base-4k-hf](https://huggingface.co/nvidia/nemotron-3-8b-base-4k-hf). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the Nemotron model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`NemotronModel`] hidden_size (`int`, *optional*, defaults to 6144): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 24576): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 48): Number of attention heads for each attention layer in the Transformer decoder. head_dim (`int`, *optional*): Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if None num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.0134): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): Padding token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 3): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. partial_rotary_factor (`float`, *optional*, defaults to 0.5): Percentage of the query and keys which will have rotary embedding. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in up_proj and down_proj layers in the MLP layers. ```python >>> from transformers import NemotronModel, NemotronConfig >>> # Initializing a Nemotron nemotron-15b style configuration >>> configuration = NemotronConfig() >>> # Initializing a model from the nemotron-15b style configuration >>> model = NemotronModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'nemotron' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=256000, hidden_size=6144, intermediate_size=24576, num_hidden_layers=32, num_attention_heads=48, head_dim=None, num_key_value_heads=None, hidden_act='relu2', max_position_embeddings=4096, initializer_range=0.0134, norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=2, eos_token_id=3, tie_word_embeddings=False, rope_theta=10000.0, partial_rotary_factor=0.5, attention_bias=False, attention_dropout=0.0, mlp_bias=False, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim if head_dim is not None else hidden_size // num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.norm_eps = norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.partial_rotary_factor = partial_rotary_factor rope_config_validation(self) self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.mlp_bias = mlp_bias super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
class NemotronConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`NemotronModel`]. It is used to instantiate an Nemotron model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nemotron-8B. e.g. [nvidia/nemotron-3-8b-base-4k-hf](https://huggingface.co/nvidia/nemotron-3-8b-base-4k-hf). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the Nemotron model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`NemotronModel`] hidden_size (`int`, *optional*, defaults to 6144): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 24576): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 48): Number of attention heads for each attention layer in the Transformer decoder. head_dim (`int`, *optional*): Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if None num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.0134): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): Padding token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 3): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. partial_rotary_factor (`float`, *optional*, defaults to 0.5): Percentage of the query and keys which will have rotary embedding. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in up_proj and down_proj layers in the MLP layers. ```python >>> from transformers import NemotronModel, NemotronConfig >>> # Initializing a Nemotron nemotron-15b style configuration >>> configuration = NemotronConfig() >>> # Initializing a model from the nemotron-15b style configuration >>> model = NemotronModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=256000, hidden_size=6144, intermediate_size=24576, num_hidden_layers=32, num_attention_heads=48, head_dim=None, num_key_value_heads=None, hidden_act='relu2', max_position_embeddings=4096, initializer_range=0.0134, norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=2, eos_token_id=3, tie_word_embeddings=False, rope_theta=10000.0, partial_rotary_factor=0.5, attention_bias=False, attention_dropout=0.0, mlp_bias=False, **kwargs): pass
2
1
51
1
50
0
2
1.25
1
1
0
0
1
17
1
1
128
9
53
45
27
66
23
21
21
2
1
0
2
4,143
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronAttention
import torch.nn.functional as F import torch import math from ...utils.deprecation import deprecate_kwarg from typing import Optional, Union from torch import Size, Tensor, nn from ...cache_utils import Cache, DynamicCache, StaticCache from .configuration_nemotron import NemotronConfig class NemotronAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: NemotronConfig, layer_idx: Optional[int]=None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.partial_rotary_factor = config.partial_rotary_factor self.is_causal = True self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.head_dim * self.num_heads, self.hidden_size, bias=config.attention_bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if position_embeddings is not None: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights)
class NemotronAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config: NemotronConfig, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: pass
4
1
40
8
32
2
4
0.06
1
6
2
2
2
16
2
12
84
17
64
38
51
4
50
28
47
5
1
1
7
4,144
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronDecoderLayer
from typing import Optional, Union from ...utils.deprecation import deprecate_kwarg from ...cache_utils import Cache, DynamicCache, StaticCache import torch import torch.nn.functional as F from .configuration_nemotron import NemotronConfig from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer class NemotronDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: NemotronConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = NEMOTRON_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = NemotronMLP(config) self.input_layernorm = NemotronLayerNorm1P(config.hidden_size, eps=config.norm_eps) self.post_attention_layernorm = NemotronLayerNorm1P(config.hidden_size, eps=config.norm_eps) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Cache`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs
class NemotronDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: NemotronConfig, layer_idx: int): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Cache`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model ''' pass
4
1
38
5
22
13
2
0.59
1
8
4
0
2
5
2
12
79
10
44
22
30
26
23
11
20
3
1
1
4
4,145
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronFlashAttention2
from ...cache_utils import Cache, DynamicCache, StaticCache from typing import Optional, Union from ...utils.deprecation import deprecate_kwarg import torch.nn.functional as F import torch from ...modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask class NemotronFlashAttention2(NemotronAttention): """ Nemotron flash attention module. This module inherits from `NemotronAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if isinstance(past_key_values, StaticCache): raise ValueError('`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers') output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if position_embeddings is not None: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 input_dtype = query_states.dtype device_type = query_states.device.type if query_states.device.type != 'mps' else 'cpu' if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_dtype(device_type) if hasattr(torch, 'get_autocast_dtype') else torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, 'sliding_window', None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights)
class NemotronFlashAttention2(NemotronAttention): ''' Nemotron flash attention module. This module inherits from `NemotronAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. ''' def __init__(self, *args, **kwargs): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: pass
4
1
51
9
35
8
5
0.3
1
6
2
0
2
1
2
14
111
19
71
25
58
21
41
15
38
9
2
2
10
4,146
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronForCausalLM
from ...utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...cache_utils import Cache, DynamicCache, StaticCache import torch.nn.functional as F from typing import Optional, Union import torch from torch import Size, Tensor, nn from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast class NemotronForCausalLM(NemotronPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.model = NemotronModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> CausalLMOutputWithPast: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, NemotronForCausalLM >>> model = NemotronForCausalLM.from_pretrained("nvidia/nemotron-3-8b-base-4k-hf") >>> tokenizer = AutoTokenizer.from_pretrained("nvidia/nemotron-3-8b-base-4k-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position) hidden_states = outputs.last_hidden_state slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
class NemotronForCausalLM(NemotronPreTrainedModel, GenerationMixin): def __init__(self, config): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> CausalLMOutputWithPast: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, NemotronForCausalLM >>> model = NemotronForCausalLM.from_pretrained("nvidia/nemotron-3-8b-base-4k-hf") >>> tokenizer = AutoTokenizer.from_pretrained("nvidia/nemotron-3-8b-base-4k-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```''' pass
5
1
14
2
8
4
2
0.4
2
8
3
0
8
3
8
9
122
21
72
35
45
29
35
19
26
8
2
1
15
4,147
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronForQuestionAnswering
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer class NemotronForQuestionAnswering(GenericForQuestionAnswering, NemotronPreTrainedModel): base_model_prefix = 'transformer'
class NemotronForQuestionAnswering(GenericForQuestionAnswering, NemotronPreTrainedModel): pass
1
0
18
2
13
3
2
0.22
1
5
3
0
4
2
4
5
78
11
55
28
36
12
26
14
21
5
2
1
8
4,148
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronForSequenceClassification
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer class NemotronForSequenceClassification(GenericForSequenceClassification, NemotronPreTrainedModel): ...
class NemotronForSequenceClassification(GenericForSequenceClassification, NemotronPreTrainedModel): pass
1
0
21
2
17
2
3
0.11
1
7
3
0
4
3
4
5
90
11
71
31
53
8
36
18
31
9
2
1
12
4,149
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronForTokenClassification
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer class NemotronForTokenClassification(GenericForTokenClassification, NemotronPreTrainedModel): ...
class NemotronForTokenClassification(GenericForTokenClassification, NemotronPreTrainedModel): pass
1
0
17
1
14
2
3
0.11
1
5
2
0
4
4
4
5
79
8
64
28
41
7
29
15
24
5
2
1
10
4,150
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronLayerNorm1P
import torch.nn.functional as F import torch from typing import Optional, Union from torch import Size, Tensor, nn class NemotronLayerNorm1P(nn.LayerNorm): def __init__(self, normalized_shape: Union[int, list[int], Size], eps: float=1e-05, elementwise_affine: bool=True, bias: bool=True, device=None, dtype=None): super().__init__(normalized_shape, eps, elementwise_affine, bias, device, dtype) def forward(self, input: Tensor) -> Tensor: device_type = input.device.type if input.device.type != 'mps' else 'cpu' args = _cast_if_autocast_enabled(device_type, input, self.normalized_shape, self.weight + 1, self.bias, self.eps) with torch.autocast(device_type=input.device.type, enabled=False): return F.layer_norm(*args)
class NemotronLayerNorm1P(nn.LayerNorm): def __init__(self, normalized_shape: Union[int, list[int], Size], eps: float=1e-05, elementwise_affine: bool=True, bias: bool=True, device=None, dtype=None): pass def forward(self, input: Tensor) -> Tensor: pass
3
0
7
0
7
0
1
0
1
6
0
0
2
0
2
2
16
1
15
12
4
0
7
4
4
1
1
1
2
4,151
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronMLP
from ...activations import ACT2FN from torch import Size, Tensor, nn class NemotronMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.up_proj(x)))
class NemotronMLP(nn.Module): def __init__(self, config): pass def forward(self, x): pass
3
0
5
0
5
0
1
0
1
1
0
0
2
6
2
12
12
1
11
9
8
0
11
9
8
1
1
0
2
4,152
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronModel
from typing import Optional, Union from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast import torch from .configuration_nemotron import NemotronConfig from ...modeling_attn_mask_utils import AttentionMaskConverter from ...utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...cache_utils import Cache, DynamicCache, StaticCache from torch import Size, Tensor, nn import torch.nn.functional as F @auto_docstring class NemotronModel(NemotronPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`NemotronDecoderLayer`] Args: config: NemotronConfig """ def __init__(self, config: NemotronConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([NemotronDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.norm = NemotronLayerNorm1P(config.hidden_size, eps=config.norm_eps) self.rotary_emb = NemotronRotaryEmbedding(config=config) self.gradient_checkpointing = False self.post_init() @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> BaseModelOutputWithPast: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You must specify exactly one of input_ids or inputs_embeds') if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
@auto_docstring class NemotronModel(NemotronPreTrainedModel): ''' Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`NemotronDecoderLayer`] Args: config: NemotronConfig ''' def __init__(self, config: NemotronConfig): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> BaseModelOutputWithPast: pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass
9
2
42
5
31
6
7
0.23
1
16
9
0
5
7
6
7
267
36
189
65
152
43
96
35
89
24
2
2
40
4,153
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronPreTrainedModel
from ...modeling_utils import PreTrainedModel from .configuration_nemotron import NemotronConfig from torch import Size, Tensor, nn from ...utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging @auto_docstring class NemotronPreTrainedModel(PreTrainedModel): config: NemotronConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['NemotronDecoderLayer'] _skip_keys_device_placement = ['past_key_values'] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, NemotronLayerNorm1P): module.weight.data.fill_(1.0) module.bias.data.zero_()
@auto_docstring class NemotronPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
10
0
10
0
5
0
1
0
0
5
1
0
1
1
22
1
21
13
19
0
20
13
18
5
1
2
5
4,154
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronRotaryEmbedding
import torch from torch import Size, Tensor, nn import torch.nn.functional as F from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from .configuration_nemotron import NemotronConfig class NemotronRotaryEmbedding(nn.Module): inv_freq: torch.Tensor def __init__(self, config: NemotronConfig, device=None): super().__init__() self.rope_type = 'default' self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer('inv_freq', inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
class NemotronRotaryEmbedding(nn.Module): def __init__(self, config: NemotronConfig, device=None): pass @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): pass
5
0
19
2
13
4
2
0.34
1
4
1
0
3
7
3
13
61
9
41
25
32
14
36
20
32
3
1
1
7
4,155
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nemotron/modeling_nemotron.py
transformers.models.nemotron.modeling_nemotron.NemotronSdpaAttention
import torch.nn.functional as F import torch from ...cache_utils import Cache, DynamicCache, StaticCache from ...utils.deprecation import deprecate_kwarg from typing import Optional, Union class NemotronSdpaAttention(NemotronAttention): """ Nemotron attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `NemotronAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if output_attentions: logger.warning_once('NemotronModel is using NemotronSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if position_embeddings is not None: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, :key_states.shape[-2]] if query_states.device.type == 'cuda' and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() is_causal = causal_mask is None and q_len > 1 attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return (attn_output, None)
class NemotronSdpaAttention(NemotronAttention): ''' Nemotron attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `NemotronAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. ''' @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: pass
3
1
81
13
62
6
8
0.19
1
4
1
0
1
0
1
13
89
14
63
22
50
12
33
11
31
8
2
1
8
4,156
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb/tokenization_nllb.py
transformers.models.nllb.tokenization_nllb.NllbTokenizer
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from shutil import copyfile import os import sentencepiece as spm from typing import Any, Optional from ...utils.import_utils import requires @requires(backends=('sentencepiece',)) class NllbTokenizer(PreTrainedTokenizer): """ Construct an NLLB tokenizer. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import NllbTokenizer >>> tokenizer = NllbTokenizer.from_pretrained( ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie." >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt") ``` Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenizer_file (`str`, *optional*): The path to a tokenizer file to use instead of the vocab file. src_lang (`str`, *optional*): The language to use as source language for translation. tgt_lang (`str`, *optional*): The language to use as target language for translation. sp_model_kwargs (`dict[str, str]`): Additional keyword arguments to pass to the model initialization. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] prefix_tokens: list[int] = [] suffix_tokens: list[int] = [] def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[dict[str, Any]]=None, additional_special_tokens=None, legacy_behaviour=False, **kwargs): if additional_special_tokens is None: additional_special_tokens = FAIRSEQ_LANGUAGE_CODES bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token mask_token = AddedToken(mask_token, normalized=True, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.legacy_behaviour = legacy_behaviour self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file self._added_tokens_decoder = {0: bos_token, 1: pad_token, 2: eos_token, 3: unk_token} self.fairseq_offset = 1 self.sp_model_size = len(self.sp_model) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=legacy_behaviour, **kwargs) self._src_lang = src_lang if src_lang is not None else 'eng_Latn' self.cur_lang_code_id = self.convert_tokens_to_ids(self._src_lang) self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None state['sp_model_proto'] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def vocab_size(self): return len(self.sp_model) + self.fairseq_offset @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + [0] * len(token_ids_0) + suffix_ones return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs['forced_bos_token_id'] = tgt_lang_id return inputs def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> list[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" spm_id = self.sp_model.PieceToId(token) return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='eng_Latn', tgt_texts: Optional[list[str]]=None, tgt_lang: str='fra_Latn', **kwargs) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. - In legacy mode: No prefix and suffix=[eos, src_lang_code]. - In default mode: Prefix=[src_lang_code], suffix = [eos] """ self.cur_lang_code = self.convert_tokens_to_ids(src_lang) if self.legacy_behaviour: self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target lang setting. - In legacy mode: No prefix and suffix=[eos, tgt_lang_code]. - In default mode: Prefix=[tgt_lang_code], suffix = [eos] """ self.cur_lang_code = self.convert_tokens_to_ids(lang) if self.legacy_behaviour: self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id]
@requires(backends=('sentencepiece',)) class NllbTokenizer(PreTrainedTokenizer): ''' Construct an NLLB tokenizer. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import NllbTokenizer >>> tokenizer = NllbTokenizer.from_pretrained( ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie." >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt") ``` Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenizer_file (`str`, *optional*): The path to a tokenizer file to use instead of the vocab file. src_lang (`str`, *optional*): The language to use as source language for translation. tgt_lang (`str`, *optional*): The language to use as target language for translation. sp_model_kwargs (`dict[str, str]`): Additional keyword arguments to pass to the model initialization. ''' def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[dict[str, Any]]=None, additional_special_tokens=None, legacy_behaviour=False, **kwargs): pass def __getstate__(self): pass def __setstate__(self, d): pass @property def vocab_size(self): pass @property def src_lang(self) -> str: pass @src_lang.setter def src_lang(self) -> str: pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): '''Used by translation pipeline, to prepare inputs for the generate function''' pass def get_vocab(self): pass def _tokenize(self, text: str) -> list[str]: pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (strings for sub-words) in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='eng_Latn', tgt_texts: Optional[list[str]]=None, tgt_lang: str='fra_Latn', **kwargs) -> BatchEncoding: pass def _switch_to_input_mode(self): pass def _switch_to_target_mode(self): pass def set_src_lang_special_tokens(self, src_lang) -> None: '''Reset the special tokens to the source lang setting. - In legacy mode: No prefix and suffix=[eos, src_lang_code]. - In default mode: Prefix=[src_lang_code], suffix = [eos] ''' pass def set_tgt_lang_special_tokens(self, lang: str) -> None: '''Reset the special tokens to the target lang setting. - In legacy mode: No prefix and suffix=[eos, tgt_lang_code]. - In default mode: Prefix=[tgt_lang_code], suffix = [eos] ''' pass
26
10
12
1
8
3
2
0.66
1
8
1
0
21
12
21
110
354
59
178
86
121
117
117
50
95
9
3
2
42
4,157
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb/tokenization_nllb_fast.py
transformers.models.nllb.tokenization_nllb_fast.NllbTokenizerFast
from ...tokenization_utils_fast import PreTrainedTokenizerFast from tokenizers import processors from shutil import copyfile import os from typing import Optional from ...tokenization_utils import AddedToken, BatchEncoding class NllbTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" NLLB tokenizer (backed by HuggingFace's *tokenizers* library). Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import NllbTokenizerFast >>> tokenizer = NllbTokenizerFast.from_pretrained( ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie." >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt") ``` Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenizer_file (`str`, *optional*): The path to a tokenizer file to use instead of the vocab file. src_lang (`str`, *optional*): The language to use as source language for translation. tgt_lang (`str`, *optional*): The language to use as target language for translation. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = NllbTokenizer prefix_tokens: list[int] = [] suffix_tokens: list[int] = [] def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', src_lang=None, tgt_lang=None, additional_special_tokens=None, legacy_behaviour=False, **kwargs): if additional_special_tokens is None: additional_special_tokens = FAIRSEQ_LANGUAGE_CODES self.vocab_file = vocab_file mask_token = AddedToken(mask_token, normalized=True, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token self.legacy_behaviour = legacy_behaviour super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, src_lang=src_lang, tgt_lang=tgt_lang, mask_token=mask_token, additional_special_tokens=additional_special_tokens, legacy_behaviour=legacy_behaviour, **kwargs) self._src_lang = src_lang if src_lang is not None else 'eng_Latn' self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang) self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An NLLB sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs['forced_bos_token_id'] = tgt_lang_id return inputs def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='eng_Latn', tgt_texts: Optional[list[str]]=None, tgt_lang: str='fra_Latn', **kwargs) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. - In legacy mode: No prefix and suffix=[eos, src_lang_code]. - In default mode: Prefix=[src_lang_code], suffix = [eos] """ self.cur_lang_code = self.convert_tokens_to_ids(src_lang) if self.legacy_behaviour: self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing(single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens))) def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target lang setting. - In legacy mode: No prefix and suffix=[eos, tgt_lang_code]. - In default mode: Prefix=[tgt_lang_code], suffix = [eos] """ self.cur_lang_code = self.convert_tokens_to_ids(lang) if self.legacy_behaviour: self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing(single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens))) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.') if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory.') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
class NllbTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" NLLB tokenizer (backed by HuggingFace's *tokenizers* library). Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import NllbTokenizerFast >>> tokenizer = NllbTokenizerFast.from_pretrained( ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie." >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt") ``` Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenizer_file (`str`, *optional*): The path to a tokenizer file to use instead of the vocab file. src_lang (`str`, *optional*): The language to use as source language for translation. tgt_lang (`str`, *optional*): The language to use as target language for translation. ''' def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', src_lang=None, tgt_lang=None, additional_special_tokens=None, legacy_behaviour=False, **kwargs): pass @property def src_lang(self) -> str: pass @src_lang.setter def src_lang(self) -> str: pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An NLLB sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): '''Used by translation pipeline, to prepare inputs for the generate function''' pass def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='eng_Latn', tgt_texts: Optional[list[str]]=None, tgt_lang: str='fra_Latn', **kwargs) -> BatchEncoding: pass def _switch_to_input_mode(self): pass def _switch_to_target_mode(self): pass def set_src_lang_special_tokens(self, src_lang) -> None: '''Reset the special tokens to the source lang setting. - In legacy mode: No prefix and suffix=[eos, src_lang_code]. - In default mode: Prefix=[src_lang_code], suffix = [eos] ''' pass def set_tgt_lang_special_tokens(self, lang: str) -> None: '''Reset the special tokens to the target lang setting. - In legacy mode: No prefix and suffix=[eos, tgt_lang_code]. - In default mode: Prefix=[tgt_lang_code], suffix = [eos] ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
15
6
15
2
11
3
2
0.63
1
8
1
0
13
5
13
101
287
49
146
65
100
92
80
33
66
5
3
1
26
4,158
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/configuration_nllb_moe.py
transformers.models.nllb_moe.configuration_nllb_moe.NllbMoeConfig
from ...configuration_utils import PretrainedConfig class NllbMoeConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`NllbMoeModel`]. It is used to instantiate an NLLB-MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the NLLB-MoE [facebook/nllb-moe-54b](https://huggingface.co/facebook/nllb-moe-54b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the NllbMoe model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`NllbMoeModel`] or d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in encoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. second_expert_policy ( `str`, *optional*, default to `"all"`): The policy used for the sampling the probability of being sampled to a second expert for each token. normalize_router_prob_before_dropping (`bool`, *optional*, defaults to `True`): Whether or not to normalize the router probabilities before applying a mask based on the experts capacity (capacity dropping). batch_prioritized_routing (`bool`, *optional*, defaults to `True`): Whether or not to orders the tokens by their router probabilities before capacity dropping. This means that the tokens that have the highest probabilities will be routed before other tokens that might be further in the sequence. moe_eval_capacity_token_fraction (`float`, *optional*, defaults to 1.0): Fraction of tokens as capacity during validation, if set to negative, uses the same as training. Should be in range: (0.0, 1.0]. num_experts (`int`, *optional*, defaults to 128): Number of experts for each NllbMoeSparseMlp layer. expert_capacity (`int`, *optional*, defaults to 64): Number of tokens that can be stored in each expert. encoder_sparse_step (`int`, *optional*, defaults to 4): Frequency of the sparse layers in the encoder. 4 means that one out of 4 layers will be sparse. decoder_sparse_step (`int`, *optional*, defaults to 4): Frequency of the sparse layers in the decoder. 4 means that one out of 4 layers will be sparse. router_dtype (`str`, *optional*, default to `"float32"`): The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the *selective precision* discussion in [the paper](https://huggingface.co/papers/2101.03961). router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`): Whether to ignore padding tokens when routing. if `False`, the padding tokens are not routed to any experts. router_bias (`bool`, *optional*, defaults to `False`): Whether or not the classifier of the router should have a bias. moe_token_dropout (`float`, *optional*, default to 0.2): Masking rate for MoE expert output masking (EOM), which is implemented via a Dropout2d on the expert outputs. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not to return the router logits. Only set to `True` to get the auxiliary loss when training. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import NllbMoeModel, NllbMoeConfig >>> # Initializing a NllbMoe facebook/nllb-moe-54b style configuration >>> configuration = NllbMoeConfig() >>> # Initializing a model from the facebook/nllb-moe-54b style configuration >>> model = NllbMoeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'nllb-moe' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=128112, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.05, decoder_layerdrop=0.05, use_cache=True, is_encoder_decoder=True, activation_function='relu', d_model=1024, dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, router_bias=False, router_dtype='float32', router_ignore_padding_tokens=False, num_experts=128, expert_capacity=64, encoder_sparse_step=4, decoder_sparse_step=4, router_z_loss_coef=0.001, router_aux_loss_coef=0.001, second_expert_policy='all', normalize_router_prob_before_dropping=False, batch_prioritized_routing=False, moe_eval_capacity_token_fraction=1.0, moe_token_dropout=0.2, pad_token_id=1, bos_token_id=0, eos_token_id=2, output_router_logits=False, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding self.router_z_loss_coef = router_z_loss_coef self.router_aux_loss_coef = router_aux_loss_coef self.decoder_sparse_step = decoder_sparse_step self.encoder_sparse_step = encoder_sparse_step self.num_experts = num_experts self.expert_capacity = expert_capacity self.router_bias = router_bias if router_dtype not in ['float32', 'float16', 'bfloat16']: raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}") self.router_dtype = router_dtype self.router_ignore_padding_tokens = router_ignore_padding_tokens self.batch_prioritized_routing = batch_prioritized_routing self.second_expert_policy = second_expert_policy self.normalize_router_prob_before_dropping = normalize_router_prob_before_dropping self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction self.moe_token_dropout = moe_token_dropout self.output_router_logits = output_router_logits super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs)
class NllbMoeConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`NllbMoeModel`]. It is used to instantiate an NLLB-MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the NLLB-MoE [facebook/nllb-moe-54b](https://huggingface.co/facebook/nllb-moe-54b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the NllbMoe model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`NllbMoeModel`] or d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in encoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. second_expert_policy ( `str`, *optional*, default to `"all"`): The policy used for the sampling the probability of being sampled to a second expert for each token. normalize_router_prob_before_dropping (`bool`, *optional*, defaults to `True`): Whether or not to normalize the router probabilities before applying a mask based on the experts capacity (capacity dropping). batch_prioritized_routing (`bool`, *optional*, defaults to `True`): Whether or not to orders the tokens by their router probabilities before capacity dropping. This means that the tokens that have the highest probabilities will be routed before other tokens that might be further in the sequence. moe_eval_capacity_token_fraction (`float`, *optional*, defaults to 1.0): Fraction of tokens as capacity during validation, if set to negative, uses the same as training. Should be in range: (0.0, 1.0]. num_experts (`int`, *optional*, defaults to 128): Number of experts for each NllbMoeSparseMlp layer. expert_capacity (`int`, *optional*, defaults to 64): Number of tokens that can be stored in each expert. encoder_sparse_step (`int`, *optional*, defaults to 4): Frequency of the sparse layers in the encoder. 4 means that one out of 4 layers will be sparse. decoder_sparse_step (`int`, *optional*, defaults to 4): Frequency of the sparse layers in the decoder. 4 means that one out of 4 layers will be sparse. router_dtype (`str`, *optional*, default to `"float32"`): The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the *selective precision* discussion in [the paper](https://huggingface.co/papers/2101.03961). router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`): Whether to ignore padding tokens when routing. if `False`, the padding tokens are not routed to any experts. router_bias (`bool`, *optional*, defaults to `False`): Whether or not the classifier of the router should have a bias. moe_token_dropout (`float`, *optional*, default to 0.2): Masking rate for MoE expert output masking (EOM), which is implemented via a Dropout2d on the expert outputs. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not to return the router logits. Only set to `True` to get the auxiliary loss when training. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import NllbMoeModel, NllbMoeConfig >>> # Initializing a NllbMoe facebook/nllb-moe-54b style configuration >>> configuration = NllbMoeConfig() >>> # Initializing a model from the facebook/nllb-moe-54b style configuration >>> model = NllbMoeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=128112, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.05, decoder_layerdrop=0.05, use_cache=True, is_encoder_decoder=True, activation_function='relu', d_model=1024, dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, router_bias=False, router_dtype='float32', router_ignore_padding_tokens=False, num_experts=128, expert_capacity=64, encoder_sparse_step=4, decoder_sparse_step=4, router_z_loss_coef=0.001, router_aux_loss_coef=0.001, second_expert_policy='all', normalize_router_prob_before_dropping=False, batch_prioritized_routing=False, moe_eval_capacity_token_fraction=1.0, moe_token_dropout=0.2, pad_token_id=1, bos_token_id=0, eos_token_id=2, output_router_logits=False, **kwargs): pass
2
1
87
1
86
1
2
1.03
1
2
0
0
1
34
1
1
193
11
90
80
47
93
42
39
40
2
1
1
2
4,159
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeAttention
from ...processing_utils import Unpack import torch.nn as nn from ...modeling_flash_attention_utils import FlashAttentionKwargs from .configuration_nllb_moe import NllbMoeConfig import torch from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...utils.deprecation import deprecate_kwarg from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Callable, Optional, Union class NllbMoeAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, is_causal: Optional[bool]=False, config: Optional[NllbMoeConfig]=None, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = encoder_hidden_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = encoder_hidden_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = encoder_hidden_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights)
class NllbMoeAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, is_causal: Optional[bool]=False, config: Optional[NllbMoeConfig]=None, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
50
7
35
8
5
0.24
1
7
1
0
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
4,160
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeDecoder
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa from typing import Callable, Optional, Union from ...modeling_outputs import MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput from ...integrations.fsdp import is_fsdp_managed_module import math from ...integrations.deepspeed import is_deepspeed_zero3_enabled import torch.nn as nn from .configuration_nllb_moe import NllbMoeConfig import torch class NllbMoeDecoder(NllbMoePreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`NllbMoeDecoderLayer`] Args: config: NllbMoeConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = NllbMoeScaledWordEmbedding(config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = NllbMoeSinusoidalPositionalEmbedding(config.max_position_embeddings, config.d_model, self.padding_idx) sparse_step = config.decoder_sparse_step self.layers = nn.ModuleList() for i in range(config.decoder_layers): is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False self.layers.append(NllbMoeDecoderLayer(config, is_sparse, layer_idx=i)) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=True): """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 attention_mask = self._update_causal_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds) positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_router_probs = () if output_router_logits else None all_cross_attentions = () if output_attentions else None for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.layerdrop if not skip_the_layer or synced_gpus: layer_head_mask = head_mask[idx] if head_mask is not None else None cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_router_logits=output_router_logits, cache_position=cache_position) hidden_states = layer_outputs[0] if skip_the_layer: continue if output_attentions: all_self_attns += (layer_outputs[1],) all_cross_attentions += (layer_outputs[2],) if output_router_logits: all_router_probs += (layer_outputs[-1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions, all_router_probs] if v is not None)) return MoEModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, router_probs=all_router_probs) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int): if self.config._attn_implementation == 'flash_attention_2': attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, inputs_embeds, past_key_values_length) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) elif attention_mask is None: attention_mask = make_flex_block_causal_mask(torch.ones(size=input_shape, device=inputs_embeds.device)) else: attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) return attention_mask def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): if encoder_hidden_states is not None and encoder_attention_mask is not None: if self.config._attn_implementation == 'flash_attention_2': encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == 'sdpa': encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) elif self.config._attn_implementation == 'flex_attention': if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False) else: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) return encoder_attention_mask
class NllbMoeDecoder(NllbMoePreTrainedModel): ''' Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`NllbMoeDecoderLayer`] Args: config: NllbMoeConfig embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=True): ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int): pass def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): pass
5
2
137
21
82
35
21
0.48
1
13
5
0
2
9
2
3
286
45
164
50
146
78
86
35
83
36
2
4
41
4,161
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeDecoderLayer
from ...modeling_layers import GradientCheckpointingLayer import torch.nn as nn from .configuration_nllb_moe import NllbMoeConfig import torch from ...utils.deprecation import deprecate_kwarg from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Callable, Optional, Union from ...activations import ACT2FN class NllbMoeDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: NllbMoeConfig, is_sparse: bool=False, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = config.d_model self.is_sparse = is_sparse self.self_attn = NllbMoeAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.attn_dropout = nn.Dropout(config.dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.cross_attention = NllbMoeAttention(self.embed_dim, config.decoder_attention_heads, config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx) self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim) if not self.is_sparse: self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.decoder_ffn_dim) else: self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.decoder_ffn_dim) self.ff_layer_norm = nn.LayerNorm(config.d_model) self.ff_dropout = nn.Dropout(config.activation_dropout) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, output_router_logits: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=True) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.cross_attention_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.cross_attention(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, output_attentions=output_attentions, cache_position=cache_position) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.ff_layer_norm(hidden_states) if self.is_sparse: hidden_states, router_states = self.ffn(hidden_states, attention_mask) else: hidden_states, router_states = (self.ffn(hidden_states), None) hidden_states = self.ff_dropout(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if output_router_logits: outputs += (router_states,) return outputs
class NllbMoeDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: NllbMoeConfig, is_sparse: bool=False, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, output_router_logits: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=True) -> torch.Tensor: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
4
1
64
7
42
15
5
0.36
1
7
4
0
2
12
2
12
129
15
84
36
69
30
50
24
47
8
1
1
10
4,162
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeDenseActDense
from ...activations import ACT2FN from .configuration_nllb_moe import NllbMoeConfig import torch import torch.nn as nn class NllbMoeDenseActDense(nn.Module): def __init__(self, config: NllbMoeConfig, ffn_dim: int): super().__init__() self.fc1 = nn.Linear(config.d_model, ffn_dim) self.fc2 = nn.Linear(ffn_dim, config.d_model) self.dropout = nn.Dropout(config.activation_dropout) self.act = ACT2FN[config.activation_function] def forward(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if isinstance(self.fc2.weight, torch.Tensor) and hidden_states.dtype != self.fc2.weight.dtype and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8): hidden_states = hidden_states.to(self.fc2.weight.dtype) hidden_states = self.fc2(hidden_states) return hidden_states
class NllbMoeDenseActDense(nn.Module): def __init__(self, config: NllbMoeConfig, ffn_dim: int): pass def forward(self, hidden_states): pass
3
0
9
0
9
0
2
0
1
4
1
0
2
4
2
12
20
1
19
7
16
0
15
7
12
2
1
1
3
4,163
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeEncoder
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa from typing import Callable, Optional, Union from ...modeling_outputs import MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput import math import torch.nn as nn from .configuration_nllb_moe import NllbMoeConfig import torch class NllbMoeEncoder(NllbMoePreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`NllbMoeEncoderLayer`]. Args: config: NllbMoeConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = NllbMoeScaledWordEmbedding(config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = NllbMoeSinusoidalPositionalEmbedding(config.max_position_embeddings, embed_dim, self.padding_idx) sparse_step = config.encoder_sparse_step self.layers = nn.ModuleList() for i in range(config.encoder_layers): is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False self.layers.append(NllbMoeEncoderLayer(config, is_sparse)) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None): """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input_ids, inputs_embeds) embed_pos = embed_pos.to(inputs_embeds.device) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) attention_mask = self._update_full_mask(attention_mask, inputs_embeds) encoder_states = () if output_hidden_states else None all_router_probs = () if output_router_logits else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) dropout_probability = torch.rand([]) if self.training and dropout_probability < self.layerdrop: layer_outputs = (None, None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions, output_router_logits=output_router_logits) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_router_logits: all_router_probs += (layer_outputs[-1],) last_hidden_state = self.layer_norm(hidden_states) if output_hidden_states: encoder_states += (last_hidden_state,) if not return_dict: return tuple((v for v in [last_hidden_state, encoder_states, all_attentions, all_router_probs] if v is not None)) return MoEModelOutput(last_hidden_state=last_hidden_state, hidden_states=encoder_states, attentions=all_attentions, router_probs=all_router_probs) def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): if attention_mask is not None: if 'flash' in self.config._attn_implementation: attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask
class NllbMoeEncoder(NllbMoePreTrainedModel): ''' Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`NllbMoeEncoderLayer`]. Args: config: NllbMoeConfig embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None): ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): pass
4
2
89
14
55
20
15
0.44
1
12
5
0
2
9
2
3
190
31
111
37
98
49
66
27
63
24
2
3
29
4,164
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeEncoderLayer
from ...modeling_layers import GradientCheckpointingLayer from .configuration_nllb_moe import NllbMoeConfig import torch import torch.nn as nn class NllbMoeEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: NllbMoeConfig, is_sparse: bool=False): super().__init__() self.embed_dim = config.d_model self.is_sparse = is_sparse self.self_attn = NllbMoeAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config) self.attn_dropout = nn.Dropout(config.dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) if not self.is_sparse: self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.encoder_ffn_dim) else: self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.encoder_ffn_dim) self.ff_layer_norm = nn.LayerNorm(config.d_model) self.ff_dropout = nn.Dropout(config.activation_dropout) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False, output_router_logits: bool=False) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.ff_layer_norm(hidden_states) if self.is_sparse: hidden_states, router_states = self.ffn(hidden_states, attention_mask) else: hidden_states, router_states = (self.ffn(hidden_states), None) hidden_states = self.ff_dropout(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) if output_router_logits: outputs += (router_states,) return outputs
class NllbMoeEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: NllbMoeConfig, is_sparse: bool=False): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False, output_router_logits: bool=False) -> torch.Tensor: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
39
5
27
7
4
0.25
1
7
4
0
2
8
2
12
79
10
55
23
45
14
35
16
32
5
1
1
7
4,165
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeForConditionalGeneration
from ...generation import GenerationMixin from .configuration_nllb_moe import NllbMoeConfig import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Callable, Optional, Union from ...modeling_outputs import MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput from ...utils import auto_docstring, is_torch_flex_attn_available, logging from torch.nn import CrossEntropyLoss import torch.nn as nn @auto_docstring(custom_intro='\n The NllbMoe Model with a language modeling head. Can be used for summarization.\n ') class NllbMoeForConditionalGeneration(NllbMoePreTrainedModel, GenerationMixin): base_model_prefix = 'model' _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: NllbMoeConfig): super().__init__(config) self.model = NllbMoeModel(config) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.router_z_loss_coef = config.router_z_loss_coef self.router_aux_loss_coef = config.router_aux_loss_coef self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqMoEOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) NllbMoe uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example Translation: ```python >>> from transformers import AutoTokenizer, NllbMoeForConditionalGeneration >>> model = NllbMoeForConditionalGeneration.from_pretrained("facebook/nllb-moe-54b") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b") >>> text_to_translate = "Life is like a box of chocolates" >>> model_inputs = tokenizer(text_to_translate, return_tensors="pt") >>> # translate to French >>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("eng_Latn")) >>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)) ``` """ return_dict = return_dict if return_dict is not None else self.config.return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_router_logits = output_router_logits if output_router_logits is not None else self.config.output_router_logits if labels is not None: if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position) lm_logits = self.lm_head(outputs[0]) loss = None encoder_aux_loss = None decoder_aux_loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) if output_router_logits: encoder_router_logits = outputs[-1] decoder_router_logits = outputs[3 if output_attentions else 4] encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_router_logits) encoder_aux_loss = load_balancing_loss_func(encoder_router_logits, encoder_expert_indexes) decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_router_logits) decoder_aux_loss = load_balancing_loss_func(decoder_router_logits, decoder_expert_indexes) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if output_router_logits and labels is not None: aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss) loss = loss + aux_loss output = (loss,) if loss is not None else () if not return_dict: output += (lm_logits,) if output_router_logits: output += (encoder_aux_loss, decoder_aux_loss, *outputs[1:]) else: output += outputs[1:] return output return Seq2SeqMoEOutput(loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, cross_attentions=outputs.cross_attentions, encoder_aux_loss=encoder_aux_loss, decoder_aux_loss=decoder_aux_loss, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, decoder_hidden_states=outputs.decoder_hidden_states, encoder_attentions=outputs.encoder_attentions, decoder_attentions=outputs.decoder_attentions, encoder_router_logits=outputs.encoder_router_logits, decoder_router_logits=outputs.decoder_router_logits) def _unpack_router_logits(self, router_outputs): total_router_logits = [] total_expert_indexes = [] for router_output in router_outputs: if router_output is not None: router_logits, expert_indexes = router_output total_router_logits.append(router_logits) total_expert_indexes.append(expert_indexes) total_router_logits = torch.cat(total_router_logits, dim=1) if len(total_router_logits) > 0 else None total_expert_indexes = torch.stack(total_expert_indexes, dim=1) if len(total_expert_indexes) > 0 else None return (total_router_logits, total_expert_indexes)
@auto_docstring(custom_intro='\n The NllbMoe Model with a language modeling head. Can be used for summarization.\n ') class NllbMoeForConditionalGeneration(NllbMoePreTrainedModel, GenerationMixin): def __init__(self, config: NllbMoeConfig): pass def get_encoder(self): pass def get_decoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqMoEOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) NllbMoe uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example Translation: ```python >>> from transformers import AutoTokenizer, NllbMoeForConditionalGeneration >>> model = NllbMoeForConditionalGeneration.from_pretrained("facebook/nllb-moe-54b") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b") >>> text_to_translate = "Life is like a box of chocolates" >>> model_inputs = tokenizer(text_to_translate, return_tensors="pt") >>> # translate to French >>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("eng_Latn")) >>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)) ``` ''' pass def _unpack_router_logits(self, router_outputs): pass
8
1
19
2
16
1
3
0.08
2
7
3
0
7
4
8
9
164
22
132
54
100
11
67
33
58
13
2
2
25
4,166
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeModel
import math import torch.nn as nn from .configuration_nllb_moe import NllbMoeConfig import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Callable, Optional, Union from ...modeling_outputs import MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput from ...utils import auto_docstring, is_torch_flex_attn_available, logging @auto_docstring class NllbMoeModel(NllbMoePreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: NllbMoeConfig): super().__init__(config) padding_idx, vocab_size = (config.pad_token_id, config.vocab_size) embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = NllbMoeScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = NllbMoeEncoder(config, self.shared) self.decoder = NllbMoeDecoder(config, self.shared) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=True) -> Union[tuple[torch.Tensor], Seq2SeqMoEModelOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) NllbMoe uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, NllbMoeModel >>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts") >>> model = SwitchTransformersModel.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for NllbMoeModel >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, MoEModelOutput)): encoder_outputs = MoEModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqMoEModelOutput(past_key_values=decoder_outputs.past_key_values, cross_attentions=decoder_outputs.cross_attentions, last_hidden_state=decoder_outputs.last_hidden_state, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, decoder_hidden_states=decoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, decoder_attentions=decoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_probs, decoder_router_logits=decoder_outputs.router_probs)
@auto_docstring class NllbMoeModel(NllbMoePreTrainedModel): def __init__(self, config: NllbMoeConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _tie_weights(self): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=True) -> Union[tuple[torch.Tensor], Seq2SeqMoEModelOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) NllbMoe uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, NllbMoeModel >>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts") >>> model = SwitchTransformersModel.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for NllbMoeModel >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```''' pass
9
1
17
2
13
3
2
0.21
1
9
6
0
7
3
7
8
133
19
94
34
65
20
33
15
25
8
2
1
16
4,167
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoePreTrainedModel
from ...utils import auto_docstring, is_torch_flex_attn_available, logging import torch.nn as nn from .configuration_nllb_moe import NllbMoeConfig from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel @auto_docstring class NllbMoePreTrainedModel(PreTrainedModel): config: NllbMoeConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['NllbMoeEncoderLayer', 'NllbMoeDecoderLayer'] _supports_flash_attn = False _supports_sdpa = False _supports_flex_attn = False def _init_weights(self, module: nn.Module): """Initialize the weights""" std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_()
@auto_docstring class NllbMoePreTrainedModel(PreTrainedModel): def _init_weights(self, module: nn.Module): '''Initialize the weights''' pass
3
1
11
0
10
1
5
0.07
1
0
0
4
1
0
1
1
17
1
15
7
13
1
14
7
12
5
1
2
5
4,168
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeScaledWordEmbedding
from typing import Callable, Optional, Union import torch import torch.nn as nn class NllbMoeScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale
class NllbMoeScaledWordEmbedding(nn.Embedding): ''' This module overrides nn.Embeddings' forward by multiplying with embeddings scale. ''' def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): pass def forward(self, input_ids: torch.Tensor): pass
3
1
3
0
3
0
1
0.5
1
4
0
0
2
1
2
2
11
2
6
4
3
3
6
4
3
1
1
0
2
4,169
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeSinusoidalPositionalEmbedding
from typing import Callable, Optional, Union import torch import torch.nn as nn import math class NllbMoeSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, 'weights'): emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer('weights', emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values_length: int=0): if input_ids is not None: bsz, seq_len = input_ids.size() position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(input_ids.device) else: bsz, seq_len = inputs_embeds.size()[:-1] position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, self.padding_idx) max_pos = self.padding_idx + 1 + seq_len + past_key_values_length if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach() @staticmethod def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length @staticmethod def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
class NllbMoeSinusoidalPositionalEmbedding(nn.Module): '''This module produces sinusoidal positional embeddings of any length.''' def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None): pass def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): pass @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): ''' Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". ''' pass @torch.no_grad() def forward(self, input_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values_length: int=0): pass @staticmethod def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx): ''' We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor ''' pass @staticmethod def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): ''' Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor ''' pass
11
4
13
2
9
3
2
0.34
1
3
0
0
4
3
5
15
76
13
47
22
37
16
38
18
32
3
1
1
10
4,170
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeSparseMLP
import torch.nn as nn from typing import Callable, Optional, Union from .configuration_nllb_moe import NllbMoeConfig import torch class NllbMoeSparseMLP(nn.Module): """ Implementation of the NLLB-MoE sparse MLP module. """ def __init__(self, config: NllbMoeConfig, ffn_dim: int, expert_class: nn.Module=NllbMoeDenseActDense): super().__init__() self.router = NllbMoeTop2Router(config) self.moe_token_dropout = config.moe_token_dropout self.token_dropout = nn.Dropout(self.moe_token_dropout) self.num_experts = config.num_experts self.experts = nn.ModuleDict() for idx in range(self.num_experts): self.experts[f'expert_{idx}'] = expert_class(config, ffn_dim) def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.Tensor]=False): """ The goal of this forward pass is to have the same number of operation as the equivalent `NllbMoeDenseActDense` (mlp) layer. This means that all of the hidden states should be processed at most twice ( since we are using a top_2 gating mechanism). This means that we keep the complexity to O(batch_size x sequence_length x hidden_dim) instead of O(num_experts x batch_size x sequence_length x hidden_dim). 1- Get the `router_probs` from the `router`. The shape of the `router_mask` is `(batch_size X sequence_length, num_expert)` and corresponds to the boolean version of the `router_probs`. The inputs are masked using the `router_mask`. 2- Dispatch the hidden_states to its associated experts. The router probabilities are used to weight the contribution of each experts when updating the masked hidden states. Args: hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`): The hidden states padding_mask (`torch.Tensor`, *optional*, defaults to `False`): Attention mask. Can be in the causal form or not. Returns: hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`): Updated hidden states router_logits (`torch.Tensor` of shape `(batch_size, sequence_length, num_experts)`): Needed for computing the loss """ batch_size, sequence_length, hidden_dim = hidden_states.shape top_1_mask, router_probs = self.router(hidden_states, padding_mask) router_mask = router_probs.bool() hidden_states = hidden_states.reshape(batch_size * sequence_length, hidden_dim) masked_hidden_states = torch.einsum('bm,be->ebm', hidden_states, router_mask) for idx, expert in enumerate(self.experts.values()): token_indices = router_mask[:, idx] combining_weights = router_probs[token_indices, idx] expert_output = expert(masked_hidden_states[idx, token_indices]) if self.moe_token_dropout > 0: if self.training: expert_output = self.token_dropout(expert_output) else: expert_output *= 1 - self.moe_token_dropout masked_hidden_states[idx, token_indices] = torch.einsum('b,be->be', combining_weights, expert_output) hidden_states = masked_hidden_states.sum(dim=0).reshape(batch_size, sequence_length, hidden_dim) top_1_expert_index = torch.argmax(top_1_mask, dim=-1) return (hidden_states, (router_probs, top_1_expert_index))
class NllbMoeSparseMLP(nn.Module): ''' Implementation of the NLLB-MoE sparse MLP module. ''' def __init__(self, config: NllbMoeConfig, ffn_dim: int, expert_class: nn.Module=NllbMoeDenseActDense): pass def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.Tensor]=False): ''' The goal of this forward pass is to have the same number of operation as the equivalent `NllbMoeDenseActDense` (mlp) layer. This means that all of the hidden states should be processed at most twice ( since we are using a top_2 gating mechanism). This means that we keep the complexity to O(batch_size x sequence_length x hidden_dim) instead of O(num_experts x batch_size x sequence_length x hidden_dim). 1- Get the `router_probs` from the `router`. The shape of the `router_mask` is `(batch_size X sequence_length, num_expert)` and corresponds to the boolean version of the `router_probs`. The inputs are masked using the `router_mask`. 2- Dispatch the hidden_states to its associated experts. The router probabilities are used to weight the contribution of each experts when updating the masked hidden states. Args: hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`): The hidden states padding_mask (`torch.Tensor`, *optional*, defaults to `False`): Attention mask. Can be in the causal form or not. Returns: hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`): Updated hidden states router_logits (`torch.Tensor` of shape `(batch_size, sequence_length, num_experts)`): Needed for computing the loss ''' pass
3
2
29
4
14
11
3
0.83
1
8
3
0
2
5
2
12
63
10
29
18
26
24
28
18
25
4
1
3
6
4,171
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nllb_moe/modeling_nllb_moe.py
transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeTop2Router
from .configuration_nllb_moe import NllbMoeConfig import torch from typing import Callable, Optional, Union import math import torch.nn as nn class NllbMoeTop2Router(nn.Module): """ Router using tokens choose top-2 experts assignment. This router uses the same mechanism as in NLLB-MoE from the fairseq repository. Items are sorted by router_probs and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each token is processed by an expert**, or that each expert receives at least one token. The router combining weights are also returned to make sure that the states that are not updated will be masked. """ def __init__(self, config: NllbMoeConfig): super().__init__() self.num_experts = config.num_experts self.expert_capacity = config.expert_capacity self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias) self.router_ignore_padding_tokens = config.router_ignore_padding_tokens self.dtype = getattr(torch, config.router_dtype) self.second_expert_policy = config.second_expert_policy self.normalize_router_prob_before_dropping = config.normalize_router_prob_before_dropping self.batch_prioritized_routing = config.batch_prioritized_routing self.moe_eval_capacity_token_fraction = config.moe_eval_capacity_token_fraction def _cast_classifier(self): """ `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an instance of the `Linear8bitLt` class by checking special attributes. """ if not (hasattr(self.classifier, 'SCB') or hasattr(self.classifier, 'CB')): self.classifier = self.classifier.to(self.dtype) def normalize_router_probabilities(self, router_probs, top_1_mask, top_2_mask): top_1_max_probs = (router_probs * top_1_mask).sum(dim=1) top_2_max_probs = (router_probs * top_2_mask).sum(dim=1) denom_s = torch.clamp(top_1_max_probs + top_2_max_probs, min=torch.finfo(router_probs.dtype).eps) top_1_max_probs = top_1_max_probs / denom_s top_2_max_probs = top_2_max_probs / denom_s return (top_1_max_probs, top_2_max_probs) def route_tokens(self, router_logits: torch.Tensor, input_dtype: torch.dtype=torch.float32, padding_mask: Optional[torch.LongTensor]=None) -> tuple: """ Computes the `dispatch_mask` and the `dispatch_weights` for each experts. The masks are adapted to the expert capacity. """ nb_tokens = router_logits.shape[0] router_probs = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(input_dtype) top_1_expert_index = torch.argmax(router_probs, dim=-1) top_1_mask = torch.nn.functional.one_hot(top_1_expert_index, num_classes=self.num_experts) if self.second_expert_policy == 'sampling': gumbel = torch.distributions.gumbel.Gumbel(0, 1).rsample router_logits += gumbel(router_logits.shape).to(router_logits.device) logits_except_top_1 = router_logits.masked_fill(top_1_mask.bool(), float('-inf')) top_2_expert_index = torch.argmax(logits_except_top_1, dim=-1) top_2_mask = torch.nn.functional.one_hot(top_2_expert_index, num_classes=self.num_experts) if self.normalize_router_prob_before_dropping: top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities(router_probs, top_1_mask, top_2_mask) if self.second_expert_policy == 'random': top_2_max_probs = (router_probs * top_2_mask).sum(dim=1) sampled = 2 * top_2_max_probs > torch.rand_like(top_2_max_probs.float()) top_2_mask = top_2_mask * sampled.repeat(self.num_experts, 1).transpose(1, 0) if padding_mask is not None and (not self.router_ignore_padding_tokens): if len(padding_mask.shape) == 4: padding_mask = padding_mask[:, :, -1, :].reshape(-1)[-nb_tokens:] non_padding = ~padding_mask.bool() top_1_mask = top_1_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype) top_2_mask = top_2_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype) if self.batch_prioritized_routing: importance_scores = -1 * router_probs.max(dim=1)[0] sorted_top_1_mask = top_1_mask[importance_scores.argsort(dim=0)] sorted_cumsum1 = (torch.cumsum(sorted_top_1_mask, dim=0) - 1) * sorted_top_1_mask locations1 = sorted_cumsum1[importance_scores.argsort(dim=0).argsort(dim=0)] sorted_top_2_mask = top_2_mask[importance_scores.argsort(dim=0)] sorted_cumsum2 = (torch.cumsum(sorted_top_2_mask, dim=0) - 1) * sorted_top_2_mask locations2 = sorted_cumsum2[importance_scores.argsort(dim=0).argsort(dim=0)] locations2 += torch.sum(top_1_mask, dim=0, keepdim=True) else: locations1 = torch.cumsum(top_1_mask, dim=0) - 1 locations2 = torch.cumsum(top_2_mask, dim=0) - 1 locations2 += torch.sum(top_1_mask, dim=0, keepdim=True) if not self.training and self.moe_eval_capacity_token_fraction > 0: self.expert_capacity = math.ceil(self.moe_eval_capacity_token_fraction * nb_tokens) else: capacity = 2 * math.ceil(nb_tokens / self.num_experts) self.expert_capacity = capacity if self.expert_capacity is None else self.expert_capacity top_1_mask = top_1_mask * torch.lt(locations1, self.expert_capacity) top_2_mask = top_2_mask * torch.lt(locations2, self.expert_capacity) if not self.normalize_router_prob_before_dropping: top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities(router_probs, top_1_mask, top_2_mask) gates1 = top_1_max_probs[:, None] * top_1_mask gates2 = top_2_max_probs[:, None] * top_2_mask router_probs = gates1 + gates2 return (top_1_mask, router_probs) def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.LongTensor]=None) -> tuple: """ The hidden states are reshaped to simplify the computation of the router probabilities (combining weights for each experts.) Args: hidden_states (`torch.Tensor`): (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. Returns: top_1_mask (`torch.Tensor` of shape (batch_size, sequence_length)): Index tensor of shape [batch_size, sequence_length] corresponding to the expert selected for each token using the top1 probabilities of the router. router_probabilities (`torch.Tensor` of shape (batch_size, sequence_length, nump_experts)): Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each token and expert. Used for routing tokens to experts. router_logits (`torch.Tensor` of shape (batch_size, sequence_length))): Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. This is used later for computing router z-loss. """ self.input_dtype = hidden_states.dtype batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.reshape(batch_size * sequence_length, hidden_dim) hidden_states = hidden_states.to(self.dtype) self._cast_classifier() router_logits = self.classifier(hidden_states) top_1_mask, router_probs = self.route_tokens(router_logits, self.input_dtype, padding_mask) return (top_1_mask, router_probs)
class NllbMoeTop2Router(nn.Module): ''' Router using tokens choose top-2 experts assignment. This router uses the same mechanism as in NLLB-MoE from the fairseq repository. Items are sorted by router_probs and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each token is processed by an expert**, or that each expert receives at least one token. The router combining weights are also returned to make sure that the states that are not updated will be masked. ''' def __init__(self, config: NllbMoeConfig): pass def _cast_classifier(self): ''' `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an instance of the `Linear8bitLt` class by checking special attributes. ''' pass def normalize_router_probabilities(self, router_probs, top_1_mask, top_2_mask): pass def route_tokens(self, router_logits: torch.Tensor, input_dtype: torch.dtype=torch.float32, padding_mask: Optional[torch.LongTensor]=None) -> tuple: ''' Computes the `dispatch_mask` and the `dispatch_weights` for each experts. The masks are adapted to the expert capacity. ''' pass def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.LongTensor]=None) -> tuple: ''' The hidden states are reshaped to simplify the computation of the router probabilities (combining weights for each experts.) Args: hidden_states (`torch.Tensor`): (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. Returns: top_1_mask (`torch.Tensor` of shape (batch_size, sequence_length)): Index tensor of shape [batch_size, sequence_length] corresponding to the expert selected for each token using the top1 probabilities of the router. router_probabilities (`torch.Tensor` of shape (batch_size, sequence_length, nump_experts)): Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each token and expert. Used for routing tokens to experts. router_logits (`torch.Tensor` of shape (batch_size, sequence_length))): Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. This is used later for computing router z-loss. ''' pass
6
4
27
3
18
7
3
0.46
1
4
1
0
5
10
5
15
153
23
89
48
78
41
78
43
72
10
1
2
15
4,172
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nougat/image_processing_nougat.py
transformers.models.nougat.image_processing_nougat.NougatImageProcessor
from ...utils import TensorType, filter_out_non_signature_kwargs, logging from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import get_resize_output_image_size, pad, resize, to_channel_dimension_format, to_pil_image from typing import Optional, Union import numpy as np from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments class NougatImageProcessor(BaseImageProcessor): """ Constructs a Nougat image processor. Args: do_crop_margin (`bool`, *optional*, defaults to `True`): Whether to crop the image margins. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"height": 896, "width": 672}`): Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_thumbnail (`bool`, *optional*, defaults to `True`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `False`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the images to the largest image size in the batch. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Image standard deviation. """ model_input_names = ['pixel_values'] def __init__(self, do_crop_margin: bool=True, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_thumbnail: bool=True, do_align_long_axis: bool=False, do_pad: bool=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 896, 'width': 672} size = get_size_dict(size) self.do_crop_margin = do_crop_margin self.do_resize = do_resize self.size = size self.resample = resample self.do_thumbnail = do_thumbnail self.do_align_long_axis = do_align_long_axis self.do_pad = do_pad self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD def python_find_non_zero(self, image: np.ndarray): """This is a reimplementation of a findNonZero function equivalent to cv2.""" non_zero_indices = np.column_stack(np.nonzero(image)) idxvec = non_zero_indices[:, [1, 0]] idxvec = idxvec.reshape(-1, 1, 2) return idxvec def python_bounding_rect(self, coordinates): """This is a reimplementation of a BoundingRect function equivalent to cv2.""" min_values = np.min(coordinates, axis=(0, 1)).astype(int) max_values = np.max(coordinates, axis=(0, 1)).astype(int) x_min, y_min = (min_values[0], min_values[1]) width = max_values[0] - x_min + 1 height = max_values[1] - y_min + 1 return (x_min, y_min, width, height) def crop_margin(self, image: np.ndarray, gray_threshold: int=200, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array: """ Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the threshold). Args: image (`np.array`): The image to be cropped. gray_threshold (`int`, *optional*, defaults to `200`) Value below which pixels are considered to be gray. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If unset, will use the inferred format from the input. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image) image = to_pil_image(image, input_data_format=input_data_format) data = np.array(image.convert('L')).astype(np.uint8) max_val = data.max() min_val = data.min() if max_val == min_val: image = np.array(image) image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST) image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image data = (data - min_val) / (max_val - min_val) * 255 gray = data < gray_threshold coords = self.python_find_non_zero(gray) x_min, y_min, width, height = self.python_bounding_rect(coords) image = image.crop((x_min, y_min, x_min + width, y_min + height)) image = np.array(image).astype(np.uint8) image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST) image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image def align_long_axis(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: """ Align the long axis of the image to the longest axis of the specified size. Args: image (`np.ndarray`): The image to be aligned. size (`dict[str, int]`): The size `{"height": h, "width": w}` to align the long axis to. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. Returns: `np.ndarray`: The aligned image. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = (size['height'], size['width']) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if input_data_format == ChannelDimension.LAST: rot_axes = (0, 1) elif input_data_format == ChannelDimension.FIRST: rot_axes = (1, 2) else: raise ValueError(f'Unsupported data format: {input_data_format}') if output_width < output_height and input_width > input_height or (output_width > output_height and input_width < input_height): image = np.rot90(image, 3, axes=rot_axes) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def pad_image(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: """ Pad the image to the specified size at the top, bottom, left and right. Args: image (`np.ndarray`): The image to be padded. size (`dict[str, int]`): The size `{"height": h, "width": w}` to pad the image to. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ output_height, output_width = (size['height'], size['width']) input_height, input_width = get_image_size(image, channel_dim=input_data_format) delta_width = output_width - input_width delta_height = output_height - input_height pad_top = delta_height // 2 pad_left = delta_width // 2 pad_bottom = delta_height - pad_top pad_right = delta_width - pad_left padding = ((pad_top, pad_bottom), (pad_left, pad_right)) return pad(image, padding, data_format=data_format, input_data_format=input_data_format) def thumbnail(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any corresponding dimension of the specified size. Args: image (`np.ndarray`): The image to be resized. size (`dict[str, int]`): The size `{"height": h, "width": w}` to resize the image to. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): The resampling filter to use. data_format (`Optional[Union[str, ChannelDimension]]`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = (size['height'], size['width']) height = min(input_height, output_height) width = min(input_width, output_width) if height == input_height and width == input_width: return image if input_height > input_width: width = int(input_width * height / input_height) elif input_width > input_height: height = int(input_height * width / input_width) return resize(image, size=(height, width), resample=resample, reducing_gap=2.0, data_format=data_format, input_data_format=input_data_format, **kwargs) def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resizes `image` to `(height, width)` specified by `size` using the PIL library. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size) shortest_edge = min(size['height'], size['width']) output_size = get_resize_output_image_size(image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format) resized_image = resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) return resized_image @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_crop_margin: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_thumbnail: Optional[bool]=None, do_align_long_axis: Optional[bool]=None, do_pad: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. do_crop_margin (`bool`, *optional*, defaults to `self.do_crop_margin`): Whether to crop the image margins. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to min(size["height"], size["width"]) with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the images to the largest image size in the batch. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image by the specified scale `rescale_factor`. rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_crop_margin = do_crop_margin if do_crop_margin is not None else self.do_crop_margin do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis do_pad = do_pad if do_pad is not None else self.do_pad do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_crop_margin: images = [self.crop_margin(image, input_data_format=input_data_format) for image in images] if do_align_long_axis: images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images] if do_resize: images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_thumbnail: images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images] if do_pad: images = [self.pad_image(image=image, size=size, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors)
class NougatImageProcessor(BaseImageProcessor): ''' Constructs a Nougat image processor. Args: do_crop_margin (`bool`, *optional*, defaults to `True`): Whether to crop the image margins. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"height": 896, "width": 672}`): Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_thumbnail (`bool`, *optional*, defaults to `True`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `False`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the images to the largest image size in the batch. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Image standard deviation. ''' def __init__(self, do_crop_margin: bool=True, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_thumbnail: bool=True, do_align_long_axis: bool=False, do_pad: bool=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None: pass def python_find_non_zero(self, image: np.ndarray): '''This is a reimplementation of a findNonZero function equivalent to cv2.''' pass def python_bounding_rect(self, coordinates): '''This is a reimplementation of a BoundingRect function equivalent to cv2.''' pass def crop_margin(self, image: np.ndarray, gray_threshold: int=200, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array: ''' Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the threshold). Args: image (`np.array`): The image to be cropped. gray_threshold (`int`, *optional*, defaults to `200`) Value below which pixels are considered to be gray. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If unset, will use the inferred format from the input. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. ''' pass def align_long_axis(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: ''' Align the long axis of the image to the longest axis of the specified size. Args: image (`np.ndarray`): The image to be aligned. size (`dict[str, int]`): The size `{"height": h, "width": w}` to align the long axis to. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. Returns: `np.ndarray`: The aligned image. ''' pass def pad_image(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: ''' Pad the image to the specified size at the top, bottom, left and right. Args: image (`np.ndarray`): The image to be padded. size (`dict[str, int]`): The size `{"height": h, "width": w}` to pad the image to. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass def thumbnail(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any corresponding dimension of the specified size. Args: image (`np.ndarray`): The image to be resized. size (`dict[str, int]`): The size `{"height": h, "width": w}` to resize the image to. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): The resampling filter to use. data_format (`Optional[Union[str, ChannelDimension]]`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resizes `image` to `(height, width)` specified by `size` using the PIL library. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_crop_margin: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_thumbnail: Optional[bool]=None, do_align_long_axis: Optional[bool]=None, do_pad: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: ''' Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. do_crop_margin (`bool`, *optional*, defaults to `self.do_crop_margin`): Whether to crop the image margins. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to min(size["height"], size["width"]) with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the images to the largest image size in the batch. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image by the specified scale `rescale_factor`. rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
11
9
45
4
27
14
5
0.65
1
8
2
0
9
12
9
29
454
48
247
123
169
160
125
55
115
23
3
1
43
4,173
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nougat/processing_nougat.py
transformers.models.nougat.processing_nougat.NougatProcessor
from typing import Optional, Union from transformers.tokenization_utils_base import PreTokenizedInput, TextInput, TruncationStrategy from ...utils import PaddingStrategy, TensorType from ...processing_utils import ProcessorMixin class NougatProcessor(ProcessorMixin): """ Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor. [`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the [`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information. Args: image_processor ([`NougatImageProcessor`]): An instance of [`NougatImageProcessor`]. The image processor is a required input. tokenizer ([`NougatTokenizerFast`]): An instance of [`NougatTokenizerFast`]. The tokenizer is a required input. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = 'AutoImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__(self, images=None, text=None, do_crop_margin: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: 'PILImageResampling'=None, do_thumbnail: Optional[bool]=None, do_align_long_axis: Optional[bool]=None, do_pad: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional['ChannelDimension']='channels_first', input_data_format: Optional[Union[str, 'ChannelDimension']]=None, text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True): if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.') if images is not None: inputs = self.image_processor(images, do_crop_margin=do_crop_margin, do_resize=do_resize, size=size, resample=resample, do_thumbnail=do_thumbnail, do_align_long_axis=do_align_long_axis, do_pad=do_pad, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, return_tensors=return_tensors, data_format=data_format, input_data_format=input_data_format) if text is not None: encodings = self.tokenizer(text, text_pair=text_pair, text_target=text_target, text_pair_target=text_pair_target, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose) if text is None: return inputs elif images is None: return encodings else: inputs['labels'] = encodings['input_ids'] return inputs def post_process_generation(self, *args, **kwargs): """ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.post_process_generation`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.post_process_generation(*args, **kwargs)
class NougatProcessor(ProcessorMixin): ''' Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor. [`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the [`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information. Args: image_processor ([`NougatImageProcessor`]): An instance of [`NougatImageProcessor`]. The image processor is a required input. tokenizer ([`NougatTokenizerFast`]): An instance of [`NougatTokenizerFast`]. The tokenizer is a required input. ''' def __init__(self, image_processor, tokenizer): pass def __call__(self, images=None, text=None, do_crop_margin: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: 'PILImageResampling'=None, do_thumbnail: Optional[bool]=None, do_align_long_axis: Optional[bool]=None, do_pad: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional['ChannelDimension']='channels_first', input_data_format: Optional[Union[str, 'ChannelDimension']]=None, text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True): pass def post_process_generation(self, *args, **kwargs): ''' This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.post_process_generation`]. Please refer to the docstring of this method for more information. ''' pass
4
2
22
0
20
3
2
0.25
1
7
0
0
5
1
5
22
134
10
102
50
58
25
25
12
19
6
2
1
10
4,174
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nougat/tokenization_nougat_fast.py
transformers.models.nougat.tokenization_nougat_fast.NougatTokenizerFast
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast from functools import partial from ...utils import is_levenshtein_available, is_nltk_available, logging, requires_backends from typing import Optional, Union from transformers.utils import add_end_docstrings from transformers.tokenization_utils_base import INIT_TOKENIZER_DOCSTRING from multiprocessing import Pool import re @add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class NougatTokenizerFast(PreTrainedTokenizerFast): """ Fast tokenizer for Nougat (backed by HuggingFace tokenizers library). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class mainly adds Nougat-specific methods for postprocessing the generated text. Args: vocab_file (`str`, *optional*): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that contains the vocabulary necessary to instantiate a tokenizer. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`): Whether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = None def __init__(self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token='<unk>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', **kwargs): super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs) self.vocab_file = vocab_file def remove_hallucinated_references(self, text: str) -> str: """ Remove hallucinated or missing references from the text. This function identifies and removes references that are marked as missing or hallucinated from the input text. Args: text (`str`): The input text containing references. Returns: `str`: The text with hallucinated references removed. """ lines = text.split('\n') if len(lines) == 0: return '' clean_lines = remove_numbers(lines) slices = get_slices(lines, clean_lines) to_delete = [] for slice in slices: to_delete.append(remove_slice_from_lines(lines, clean_lines, slice)) for to_delete in reversed(to_delete): text = text.replace(to_delete, '\n\n[MISSING_PAGE_POST]\n\n') text = re.sub('## References\\n+\\[MISSING_PAGE_POST(:\\d+)?\\]', '\n\n[MISSING_PAGE_POST\\1]', text) return text def correct_tables(self, generation: str) -> str: """ Takes a generated string and fixes tables/tabulars to make them match the markdown format needed. Args: generation (str): The generated text to be postprocessed. Returns: str: The postprocessed text. Example: ```python correct_tables("\\begin{table} \\begin{tabular}{l l} & \\ \\end{tabular} \\end{table}") "\\begin{table} \\begin{tabular}{l l} & \\ \\end{tabular} \\end{table}" ``` """ for l in generation.split('\n'): if l.count('\\begin{tabular}') > 15 or l.count('\\multicolumn') > 60 or l.count('&') > 400: generation = generation.replace(l, '') generation = generation.replace('\\begin{table} \\begin{tabular}', '\\begin{table}\n\\begin{tabular}') generation = generation.replace('\\end{tabular} \\end{table}', '\\end{tabular}\n\\end{table}') generation = generation.replace('\\end{table} Tab', '\\end{table}\nTab') generation = re.sub('(^.+)\\\\begin{tab', '\\1\\n\\\\begin{tab', generation, flags=re.M) generation = generation.replace('\\begin{tabular}{l l} & \\\\ \\end{tabular}', '') generation = generation.replace('\\begin{tabular}{}\n\n\\end{tabular}', '') return generation def post_process_single(self, generation: str, fix_markdown: bool=True) -> str: """ Postprocess a single generated text. Regular expressions used here are taken directly from the Nougat article authors. These expressions are commented for clarity and tested end-to-end in most cases. Args: generation (str): The generated text to be postprocessed. fix_markdown (bool, optional): Whether to perform Markdown formatting fixes. Default is True. Returns: str: The postprocessed text. """ generation = re.sub('(?:\\n|^)#+ \\d*\\W? ?(.{100,})', '\\n\\1', generation) generation = generation.strip() generation = generation.replace('\n* [leftmargin=*]\n', '\n') generation = re.sub('^#+ (?:[\\d+\\.]+|[ixv\\.]+)?\\s*(?:$|\\n\\s*)', '', generation, flags=re.M) lines = generation.split('\n') if lines[-1].startswith('#') and lines[-1].lstrip('#').startswith(' ') and (len(lines) > 1): logger.info('Likely hallucinated title at the end of the page: ' + lines[-1]) generation = '\n'.join(lines[:-1]) generation = truncate_repetitions(generation) generation = self.remove_hallucinated_references(generation) generation = re.sub('^\\* \\[\\d+\\](\\s?[A-W]\\.+\\s?){10,}.*$', '', generation, flags=re.M) generation = re.sub('^(\\* \\[\\d+\\])\\[\\](.*)$', '\\1\\2', generation, flags=re.M) generation = re.sub('(^\\w\\n\\n|\\n\\n\\w$)', '', generation) generation = re.sub('([\\s.,()])_([a-zA-Z0-9])__([a-zA-Z0-9]){1,3}_([\\s.,:()])', '\\1\\(\\2_{\\3}\\)\\4', generation) generation = re.sub('([\\s.,\\d])_([a-zA-Z0-9])_([\\s.,\\d;])', '\\1\\(\\2\\)\\3', generation) generation = re.sub('(\\nFootnote .*?:) (?:footnotetext|thanks):\\W*(.*(?:\\n\\n|$))', '\\1 \\2', generation) generation = re.sub('\\[FOOTNOTE:.+?\\](.*?)\\[ENDFOOTNOTE\\]', '', generation) generation = normalize_list_like_lines(generation) if generation.endswith(('.', '}')): generation += '\n\n' if re.match('[A-Z0-9,;:]$', generation): generation += ' ' elif generation.startswith(('#', '**', '\\begin')): generation = '\n\n' + generation elif generation.split('\n')[-1].startswith(('#', 'Figure', 'Table')): generation = generation + '\n\n' else: try: last_word = generation.split(' ')[-1] if last_word in nltk.corpus.words.words(): generation += ' ' except LookupError: generation += ' ' generation = self.correct_tables(generation) generation = generation.replace('\\begin{array}[]{', '\\begin{array}{') generation = re.sub('\\\\begin{tabular}{([clr ]){2,}}\\s*[& ]*\\s*(\\\\\\\\)? \\\\end{tabular}', '', generation) generation = re.sub('(\\*\\*S\\. A\\. B\\.\\*\\*\\n+){2,}', '', generation) generation = re.sub('^#+( [\\[\\d\\w])?$', '', generation, flags=re.M) generation = re.sub('^\\.\\s*$', '', generation, flags=re.M) generation = re.sub('\\n{3,}', '\n\n', generation) if fix_markdown: return markdown_compatible(generation) else: return generation def post_process_generation(self, generation: Union[str, list[str]], fix_markdown: bool=True, num_workers: Optional[int]=None) -> Union[str, list[str]]: """ Postprocess a generated text or a list of generated texts. This function can be used to perform postprocessing on generated text, such as fixing Markdown formatting. Postprocessing is quite slow so it is recommended to use multiprocessing to speed up the process. Args: generation (Union[str, list[str]]): The generated text or a list of generated texts. fix_markdown (`bool`, *optional*, defaults to `True`): Whether to perform Markdown formatting fixes. num_workers (`int`, *optional*): Optional number of workers to pass to leverage multiprocessing (postprocessing several texts in parallel). Returns: Union[str, list[str]]: The postprocessed text or list of postprocessed texts. """ requires_backends(self, ['nltk', 'levenshtein']) if isinstance(generation, list): if num_workers is not None and isinstance(num_workers, int): with Pool(num_workers) as p: return p.map(partial(self.post_process_single, fix_markdown=fix_markdown), generation) else: return [self.post_process_single(s, fix_markdown=fix_markdown) for s in generation] else: return self.post_process_single(generation, fix_markdown=fix_markdown)
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class NougatTokenizerFast(PreTrainedTokenizerFast): ''' Fast tokenizer for Nougat (backed by HuggingFace tokenizers library). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class mainly adds Nougat-specific methods for postprocessing the generated text. Args: vocab_file (`str`, *optional*): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that contains the vocabulary necessary to instantiate a tokenizer. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`): Whether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. ''' def __init__(self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token='<unk>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', **kwargs): pass def remove_hallucinated_references(self, text: str) -> str: ''' Remove hallucinated or missing references from the text. This function identifies and removes references that are marked as missing or hallucinated from the input text. Args: text (`str`): The input text containing references. Returns: `str`: The text with hallucinated references removed. ''' pass def correct_tables(self, generation: str) -> str: ''' Takes a generated string and fixes tables/tabulars to make them match the markdown format needed. Args: generation (str): The generated text to be postprocessed. Returns: str: The postprocessed text. Example: ```python correct_tables("\begin{table} \begin{tabular}{l l} & \ \end{tabular} \end{table}") "\begin{table} \begin{tabular}{l l} & \ \end{tabular} \end{table}" ``` ''' pass def post_process_single(self, generation: str, fix_markdown: bool=True) -> str: ''' Postprocess a single generated text. Regular expressions used here are taken directly from the Nougat article authors. These expressions are commented for clarity and tested end-to-end in most cases. Args: generation (str): The generated text to be postprocessed. fix_markdown (bool, optional): Whether to perform Markdown formatting fixes. Default is True. Returns: str: The postprocessed text. ''' pass def post_process_generation(self, generation: Union[str, list[str]], fix_markdown: bool=True, num_workers: Optional[int]=None) -> Union[str, list[str]]: ''' Postprocess a generated text or a list of generated texts. This function can be used to perform postprocessing on generated text, such as fixing Markdown formatting. Postprocessing is quite slow so it is recommended to use multiprocessing to speed up the process. Args: generation (Union[str, list[str]]): The generated text or a list of generated texts. fix_markdown (`bool`, *optional*, defaults to `True`): Whether to perform Markdown formatting fixes. num_workers (`int`, *optional*): Optional number of workers to pass to leverage multiprocessing (postprocessing several texts in parallel). Returns: Union[str, list[str]]: The postprocessed text or list of postprocessed texts. ''' pass
7
5
43
4
25
16
4
0.81
1
8
0
0
5
1
5
93
256
32
128
34
107
104
80
18
74
9
3
3
20
4,175
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/configuration_nystromformer.py
transformers.models.nystromformer.configuration_nystromformer.NystromformerConfig
from ...configuration_utils import PretrainedConfig class NystromformerConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`NystromformerModel`]. It is used to instantiate an Nystromformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nystromformer [uw-madison/nystromformer-512](https://huggingface.co/uw-madison/nystromformer-512) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30000): Vocabulary size of the Nystromformer model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`NystromformerModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`NystromformerModel`]. segment_means_seq_len (`int`, *optional*, defaults to 64): Sequence length used in segment-means. num_landmarks (`int`, *optional*, defaults to 64): The number of landmark (or Nystrom) points to use in Nystrom approximation of the softmax self-attention matrix. conv_kernel_size (`int`, *optional*, defaults to 65): The kernel size of depthwise convolution used in Nystrom approximation. inv_coeff_init_option (`bool`, *optional*, defaults to `False`): Whether or not to use exact coefficient computation for the initial values for the iterative method of calculating the Moore-Penrose inverse of a matrix. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import NystromformerModel, NystromformerConfig >>> # Initializing a Nystromformer uw-madison/nystromformer-512 style configuration >>> configuration = NystromformerConfig() >>> # Initializing a model from the uw-madison/nystromformer-512 style configuration >>> model = NystromformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'nystromformer' def __init__(self, vocab_size=30000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=510, type_vocab_size=2, segment_means_seq_len=64, num_landmarks=64, conv_kernel_size=65, inv_coeff_init_option=False, initializer_range=0.02, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.segment_means_seq_len = segment_means_seq_len self.num_landmarks = num_landmarks self.conv_kernel_size = conv_kernel_size self.inv_coeff_init_option = inv_coeff_init_option self.layer_norm_eps = layer_norm_eps super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
class NystromformerConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`NystromformerModel`]. It is used to instantiate an Nystromformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nystromformer [uw-madison/nystromformer-512](https://huggingface.co/uw-madison/nystromformer-512) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30000): Vocabulary size of the Nystromformer model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`NystromformerModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`NystromformerModel`]. segment_means_seq_len (`int`, *optional*, defaults to 64): Sequence length used in segment-means. num_landmarks (`int`, *optional*, defaults to 64): The number of landmark (or Nystrom) points to use in Nystrom approximation of the softmax self-attention matrix. conv_kernel_size (`int`, *optional*, defaults to 65): The kernel size of depthwise convolution used in Nystrom approximation. inv_coeff_init_option (`bool`, *optional*, defaults to `False`): Whether or not to use exact coefficient computation for the initial values for the iterative method of calculating the Moore-Penrose inverse of a matrix. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import NystromformerModel, NystromformerConfig >>> # Initializing a Nystromformer uw-madison/nystromformer-512 style configuration >>> configuration = NystromformerConfig() >>> # Initializing a model from the uw-madison/nystromformer-512 style configuration >>> model = NystromformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=510, type_vocab_size=2, segment_means_seq_len=64, num_landmarks=64, conv_kernel_size=65, inv_coeff_init_option=False, initializer_range=0.02, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): pass
2
1
40
0
40
0
1
1.31
1
1
0
0
1
16
1
1
106
9
42
41
18
55
20
19
18
1
1
0
1
4,176
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerAttention
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from torch import nn class NystromformerAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = NystromformerSelfAttention(config, position_embedding_type=position_embedding_type) self.output = NystromformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class NystromformerAttention(nn.Module): def __init__(self, config, position_embedding_type=None): pass def prune_heads(self, heads): pass def forward(self, hidden_states, attention_mask=None, output_attentions=False): pass
4
0
9
1
8
1
1
0.13
1
4
2
0
3
3
3
13
30
4
24
11
20
3
22
11
18
2
1
1
4
4,177
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerClassificationHead
from ...activations import ACT2FN from torch import nn class NystromformerClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x
class NystromformerClassificationHead(nn.Module): '''Head for sentence-level classification tasks.''' def __init__(self, config): pass def forward(self, features, **kwargs): pass
3
1
8
1
7
1
1
0.13
1
1
0
0
2
4
2
12
19
3
15
8
12
2
15
8
12
1
1
0
2
4,178
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerEmbeddings
from torch import nn import torch class NystromformerEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class NystromformerEmbeddings(nn.Module): '''Construct the embeddings from word, position and token_type embeddings.''' def __init__(self, config): pass def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): pass
3
1
27
4
21
3
4
0.17
1
1
0
0
2
6
2
12
58
9
42
16
39
7
34
16
31
7
1
2
8
4,179
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerEncoder
from typing import Optional, Union from torch import nn from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch class NystromformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([NystromformerLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
class NystromformerEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): pass
3
0
23
3
20
0
5
0
1
8
2
0
2
3
2
12
47
6
41
18
30
0
23
10
20
9
1
2
10
4,180
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerForMaskedLM
from typing import Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...utils import auto_docstring, logging import torch @auto_docstring class NystromformerForMaskedLM(NystromformerPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder'] def __init__(self, config): super().__init__(config) self.nystromformer = NystromformerModel(config) self.cls = NystromformerOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MaskedLMOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class NystromformerForMaskedLM(NystromformerPreTrainedModel): def __init__(self, config): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MaskedLMOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. ''' pass
7
1
16
2
13
2
2
0.14
1
6
3
0
4
2
4
5
76
11
58
27
35
8
25
14
20
5
2
1
8
4,181
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerForMultipleChoice
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from typing import Optional, Union from torch import nn import torch from ...utils import auto_docstring, logging from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @auto_docstring class NystromformerForMultipleChoice(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.nystromformer = NystromformerModel(config) self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.nystromformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_state = outputs[0] pooled_output = hidden_state[:, 0] pooled_output = self.pre_classifier(pooled_output) pooled_output = nn.ReLU()(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class NystromformerForMultipleChoice(NystromformerPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) ''' pass
5
1
38
5
30
6
6
0.16
1
5
2
0
2
3
2
3
86
10
69
30
46
11
30
15
27
11
2
1
12
4,182
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerForQuestionAnswering
import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn from typing import Optional, Union from ...utils import auto_docstring, logging from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput @auto_docstring class NystromformerForQuestionAnswering(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.nystromformer = NystromformerModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class NystromformerForQuestionAnswering(NystromformerPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: pass
5
0
42
5
31
7
4
0.19
1
5
2
0
2
3
2
3
92
11
68
30
46
13
33
16
30
7
2
2
8
4,183
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerForSequenceClassification
import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from typing import Optional, Union from ...utils import auto_docstring, logging from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput @auto_docstring(custom_intro='\n Nyströmformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ') class NystromformerForSequenceClassification(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.nystromformer = NystromformerModel(config) self.classifier = NystromformerClassificationHead(config) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n Nyströmformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ') class NystromformerForSequenceClassification(NystromformerPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
38
3
32
4
7
0.1
1
7
3
0
2
3
2
3
84
7
70
25
49
7
32
12
29
12
2
3
13
4,184
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerForTokenClassification
from typing import Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn from ...utils import auto_docstring, logging from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch @auto_docstring class NystromformerForTokenClassification(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.nystromformer = NystromformerModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class NystromformerForTokenClassification(NystromformerPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
5
1
31
4
24
3
3
0.09
1
5
2
0
2
4
2
3
69
9
55
26
34
5
22
13
19
5
2
1
6
4,185
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerIntermediate
from ...activations import ACT2FN import torch from torch import nn class NystromformerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class NystromformerIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
4,186
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerLMPredictionHead
from torch import nn import torch class NystromformerLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = NystromformerPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states
class NystromformerLMPredictionHead(nn.Module): def __init__(self, config): pass def _tie_weights(self): pass def forward(self, hidden_states): pass
4
0
6
1
4
1
1
0.23
1
2
1
0
3
3
3
13
21
5
13
7
9
3
13
7
9
1
1
0
3
4,187
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerLayer
from ...modeling_layers import GradientCheckpointingLayer from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer class NystromformerLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = NystromformerAttention(config) self.add_cross_attention = config.add_cross_attention self.intermediate = NystromformerIntermediate(config) self.output = NystromformerOutput(config) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class NystromformerLayer(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, output_attentions=False): pass def feed_forward_chunk(self, attention_output): pass
4
0
8
1
7
0
1
0.05
1
4
3
0
3
6
3
13
27
5
22
16
18
1
20
16
16
1
1
0
3
4,188
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerModel
from ...utils import auto_docstring, logging from typing import Optional, Union from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch @auto_docstring class NystromformerModel(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = NystromformerEmbeddings(config) self.encoder = NystromformerEncoder(config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
@auto_docstring class NystromformerModel(NystromformerPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: pass
8
1
20
2
15
2
3
0.15
1
7
3
0
5
3
5
6
109
15
82
31
59
12
42
19
36
12
2
2
17
4,189
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerOnlyMLMHead
from torch import nn import torch class NystromformerOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = NystromformerLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores
class NystromformerOnlyMLMHead(nn.Module): def __init__(self, config): pass def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: pass
3
0
3
0
3
0
1
0
1
3
1
0
2
1
2
12
8
1
7
5
4
0
7
5
4
1
1
0
2
4,190
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerOutput
from torch import nn import torch class NystromformerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class NystromformerOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
4,191
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerPreTrainedModel
from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from .configuration_nystromformer import NystromformerConfig from torch import nn @auto_docstring class NystromformerPreTrainedModel(PreTrainedModel): config: NystromformerConfig base_model_prefix = 'nystromformer' supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
@auto_docstring class NystromformerPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
15
0
12
3
6
0.44
1
0
0
6
1
0
1
1
25
2
16
5
14
7
14
5
12
6
1
2
6
4,192
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerPredictionHeadTransform
from torch import nn import torch from ...activations import ACT2FN class NystromformerPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class NystromformerPredictionHeadTransform(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
7
0
7
0
2
0
1
3
0
0
2
3
2
12
15
1
14
6
11
0
13
6
10
2
1
1
3
4,193
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerSelfAttention
from torch import nn import math import torch class NystromformerSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.num_landmarks = config.num_landmarks self.seq_len = config.segment_means_seq_len self.conv_kernel_size = config.conv_kernel_size if config.inv_coeff_init_option: self.init_option = config['inv_init_coeff_option'] else: self.init_option = 'original' self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.conv_kernel_size is not None: self.conv = nn.Conv2d(in_channels=self.num_attention_heads, out_channels=self.num_attention_heads, kernel_size=(self.conv_kernel_size, 1), padding=(self.conv_kernel_size // 2, 0), bias=False, groups=self.num_attention_heads) def iterative_inv(self, mat, n_iter=6): identity = torch.eye(mat.size(-1), device=mat.device) key = mat if self.init_option == 'original': value = 1 / torch.max(torch.sum(key, dim=-2)) * key.transpose(-1, -2) else: value = 1 / torch.max(torch.sum(key, dim=-2), dim=-1).values[:, :, None, None] * key.transpose(-1, -2) for _ in range(n_iter): key_value = torch.matmul(key, value) value = torch.matmul(0.25 * value, 13 * identity - torch.matmul(key_value, 15 * identity - torch.matmul(key_value, 7 * identity - key_value))) return value def forward(self, hidden_states, attention_mask=None, output_attentions=False): batch_size, seq_length, _ = hidden_states.shape query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) query_layer = query_layer / math.sqrt(math.sqrt(self.attention_head_size)) key_layer = key_layer / math.sqrt(math.sqrt(self.attention_head_size)) if self.num_landmarks == self.seq_len: attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) context_layer = torch.matmul(attention_probs, value_layer) else: q_landmarks = query_layer.reshape(-1, self.num_attention_heads, self.num_landmarks, self.seq_len // self.num_landmarks, self.attention_head_size).mean(dim=-2) k_landmarks = key_layer.reshape(-1, self.num_attention_heads, self.num_landmarks, self.seq_len // self.num_landmarks, self.attention_head_size).mean(dim=-2) kernel_1 = torch.nn.functional.softmax(torch.matmul(query_layer, k_landmarks.transpose(-1, -2)), dim=-1) kernel_2 = torch.nn.functional.softmax(torch.matmul(q_landmarks, k_landmarks.transpose(-1, -2)), dim=-1) attention_scores = torch.matmul(q_landmarks, key_layer.transpose(-1, -2)) if attention_mask is not None: attention_scores = attention_scores + attention_mask kernel_3 = nn.functional.softmax(attention_scores, dim=-1) attention_probs = torch.matmul(kernel_1, self.iterative_inv(kernel_2)) new_value_layer = torch.matmul(kernel_3, value_layer) context_layer = torch.matmul(attention_probs, new_value_layer) if self.conv_kernel_size is not None: context_layer += self.conv(value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class NystromformerSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): pass def iterative_inv(self, mat, n_iter=6): pass def forward(self, hidden_states, attention_mask=None, output_attentions=False): pass
4
0
31
6
24
1
4
0.06
1
4
0
0
4
13
4
14
128
25
97
39
92
6
66
39
61
6
1
2
14
4,194
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/nystromformer/modeling_nystromformer.py
transformers.models.nystromformer.modeling_nystromformer.NystromformerSelfOutput
import torch from torch import nn class NystromformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class NystromformerSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
4,195
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/configuration_olmo.py
transformers.models.olmo.configuration_olmo.OlmoConfig
from ...configuration_utils import PretrainedConfig class OlmoConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`OlmoModel`]. It is used to instantiate an OLMo model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [allenai/OLMo-7B-hf](https://huggingface.co/allenai/OLMo-7B-hf). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50304): Vocabulary size of the OLMo model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OlmoModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 50279): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. clip_qkv (`float`, *optional*): If not `None`, elements of query, key and value attention states are clipped so that their absolute value does not exceed this value. ```python >>> from transformers import OlmoModel, OlmoConfig >>> # Initializing a OLMo 7B style configuration >>> configuration = OlmoConfig() >>> # Initializing a model from the OLMo 7B style configuration >>> model = OlmoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'olmo' keys_to_ignore_at_inference = ['past_key_values'] base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'} base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])} def __init__(self, vocab_size=50304, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, clip_qkv=None, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self._rope_scaling_validation() self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.clip_qkv = clip_qkv super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError(f'`rope_scaling` must be a dictionary with two fields, `type` and `factor`, got {self.rope_scaling}') rope_scaling_type = self.rope_scaling.get('type', None) rope_scaling_factor = self.rope_scaling.get('factor', None) if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']: raise ValueError(f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
class OlmoConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`OlmoModel`]. It is used to instantiate an OLMo model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [allenai/OLMo-7B-hf](https://huggingface.co/allenai/OLMo-7B-hf). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50304): Vocabulary size of the OLMo model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OlmoModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 50279): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. clip_qkv (`float`, *optional*): If not `None`, elements of query, key and value attention states are clipped so that their absolute value does not exceed this value. ```python >>> from transformers import OlmoModel, OlmoConfig >>> # Initializing a OLMo 7B style configuration >>> configuration = OlmoConfig() >>> # Initializing a model from the OLMo 7B style configuration >>> model = OlmoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=50304, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, clip_qkv=None, **kwargs): pass def _rope_scaling_validation(self): ''' Validate the `rope_scaling` configuration. ''' pass
3
2
36
2
32
2
4
0.97
1
4
0
1
2
15
2
2
162
14
75
45
50
73
35
23
32
5
1
1
7
4,196
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modeling_olmo.py
transformers.models.olmo.modeling_olmo.OlmoAttention
import torch.nn as nn from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...utils.deprecation import deprecate_kwarg import torch import torch.nn.functional as F from typing import Callable, Optional, Union from ...cache_utils import Cache, DynamicCache from .configuration_olmo import OlmoConfig class OlmoAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: OlmoConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim ** (-0.5) self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) if self.config.clip_qkv is not None: query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) query_states = query_states.view(hidden_shape).transpose(1, 2) key_states = key_states.view(hidden_shape).transpose(1, 2) value_states = value_states.view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return (attn_output, attn_weights)
class OlmoAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config: OlmoConfig, layer_idx: int): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]: pass
4
1
40
5
35
1
4
0.03
1
5
2
1
2
11
2
12
83
11
70
31
59
2
41
23
38
6
1
2
7
4,197
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modeling_olmo.py
transformers.models.olmo.modeling_olmo.OlmoDecoderLayer
from ...cache_utils import Cache, DynamicCache from ...modeling_layers import GradientCheckpointingLayer import torch.nn.functional as F from ...utils.deprecation import deprecate_kwarg from .configuration_olmo import OlmoConfig import torch from ...utils import TransformersKwargs, auto_docstring, can_return_tuple import torch.nn as nn from ...processing_utils import Unpack from typing import Callable, Optional, Union class OlmoDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: OlmoConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = OlmoAttention(config=config, layer_idx=layer_idx) self.mlp = OlmoMLP(config) self.input_layernorm = OlmoLayerNorm(config.hidden_size) self.post_attention_layernorm = OlmoLayerNorm(config.hidden_size) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states
class OlmoDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: OlmoConfig, layer_idx: int): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor: pass
4
0
25
3
21
2
2
0.07
1
10
6
1
2
5
2
12
51
7
42
22
28
3
21
11
18
2
1
1
3
4,198
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modeling_olmo.py
transformers.models.olmo.modeling_olmo.OlmoForCausalLM
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...cache_utils import Cache, DynamicCache from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...generation import GenerationMixin import torch.nn.functional as F import torch from typing import Callable, Optional, Union from ...processing_utils import Unpack import torch.nn as nn @auto_docstring class OlmoForCausalLM(OlmoPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] _tp_plan = {'lm_head': 'colwise_rep'} _pp_plan = {'lm_head': (['hidden_states'], ['logits'])} def __init__(self, config): super().__init__(config) self.model = OlmoModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast: """ Example: ```python >>> from transformers import AutoTokenizer, OlmoForCausalLM >>> model = OlmoForCausalLM.from_pretrained("meta-olmo/Olmo-2-7b-hf") >>> tokenizer = AutoTokenizer.from_pretrained("meta-olmo/Olmo-2-7b-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you." ```""" outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = outputs.last_hidden_state slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class OlmoForCausalLM(OlmoPreTrainedModel, GenerationMixin): def __init__(self, config): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast: ''' Example: ```python >>> from transformers import AutoTokenizer, OlmoForCausalLM >>> model = OlmoForCausalLM.from_pretrained("meta-olmo/Olmo-2-7b-hf") >>> tokenizer = AutoTokenizer.from_pretrained("meta-olmo/Olmo-2-7b-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```''' pass
6
1
14
2
9
4
2
0.38
2
9
4
1
8
3
8
9
123
21
74
36
47
28
36
20
27
8
2
1
15
4,199
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modeling_olmo.py
transformers.models.olmo.modeling_olmo.OlmoLayerNorm
import torch import torch.nn.functional as F import torch.nn as nn class OlmoLayerNorm(nn.Module): """LayerNorm but with no learnable weight or bias.""" def __init__(self, hidden_size: int) -> None: super().__init__() self.normalized_shape = (hidden_size,) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: orig_dtype = hidden_states.dtype return F.layer_norm(hidden_states.to(dtype=torch.float32), self.normalized_shape, None, None, eps=1e-05).to(orig_dtype)
class OlmoLayerNorm(nn.Module): '''LayerNorm but with no learnable weight or bias.''' def __init__(self, hidden_size: int) -> None: pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
1
4
0
4
0
1
0.11
1
3
0
0
2
1
2
12
12
2
9
5
6
1
7
5
4
1
1
0
2