id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,800
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mistral/modular_mistral.py
|
transformers.models.mistral.modular_mistral.MistralForTokenClassification
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaMLP, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, eager_attention_forward
class MistralForTokenClassification(LlamaForTokenClassification):
pass
|
class MistralForTokenClassification(LlamaForTokenClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
3,801
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mistral/modular_mistral.py
|
transformers.models.mistral.modular_mistral.MistralMLP
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaMLP, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, eager_attention_forward
from torch import nn
class MistralMLP(LlamaMLP):
def __init__(self, config):
super().__init__(config)
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
class MistralMLP(LlamaMLP):
def __init__(self, config):
pass
| 2
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 5
| 1
| 13
| 6
| 0
| 6
| 5
| 4
| 0
| 6
| 5
| 4
| 1
| 2
| 0
| 1
|
3,802
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mistral/modular_mistral.py
|
transformers.models.mistral.modular_mistral.MistralModel
|
from transformers.utils.generic import check_model_inputs
from typing import Callable, Optional
from ...processing_utils import Unpack
import torch
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...cache_utils import Cache, DynamicCache
from ...modeling_outputs import BaseModelOutputWithPast
from ...utils import TransformersKwargs, auto_docstring, logging
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaMLP, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, eager_attention_forward
class MistralModel(LlamaModel):
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None)
|
class MistralModel(LlamaModel):
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 4
| 0
| 51
| 2
| 36
| 12
| 6
| 0.33
| 1
| 12
| 6
| 0
| 2
| 2
| 3
| 10
| 156
| 9
| 111
| 39
| 89
| 37
| 50
| 20
| 46
| 11
| 3
| 3
| 18
|
3,803
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/configuration_mixtral.py
|
transformers.models.mixtral.configuration_mixtral.MixtralConfig
|
from ...configuration_utils import PretrainedConfig
class MixtralConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an
Mixtral model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Mixtral-7B-v0.1 or Mixtral-7B-Instruct-v0.1.
[mixtralai/Mixtral-8x7B](https://huggingface.co/mixtralai/Mixtral-8x7B)
[mixtralai/Mixtral-7B-Instruct-v0.1](https://huggingface.co/mixtralai/Mixtral-7B-Instruct-v0.1)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MixtralModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention
allows sequence of up to 4096*32 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_theta (`float`, *optional*, defaults to 1000000.0):
The base period of the RoPE embeddings.
sliding_window (`int`, *optional*):
Sliding window attention window size. If not specified, will default to `4096`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_experts_per_tok (`int`, *optional*, defaults to 2):
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter
num_local_experts (`int`, *optional*, defaults to 8):
Number of experts per Sparse MLP layer.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss. See [here]() for more details
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
router_jitter_noise (`float`, *optional*, defaults to 0.0):
Amount of noise to add to the router.
```python
>>> from transformers import MixtralModel, MixtralConfig
>>> # Initializing a Mixtral 7B style configuration
>>> configuration = MixtralConfig()
>>> # Initializing a model from the Mixtral 7B style configuration
>>> model = MixtralModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'mixtral'
keys_to_ignore_at_inference = ['past_key_values']
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.block_sparse_moe.gate': 'colwise_rep', 'layers.*.block_sparse_moe.experts.*.w1': 'colwise', 'layers.*.block_sparse_moe.experts.*.w2': 'rowwise', 'layers.*.block_sparse_moe.experts.*.w3': 'colwise'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, head_dim=None, hidden_act='silu', max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=1000000.0, sliding_window=None, attention_dropout=0.0, num_experts_per_tok=2, num_local_experts=8, output_router_logits=False, router_aux_loss_coef=0.001, router_jitter_noise=0.0, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.attention_dropout = attention_dropout
self.head_dim = head_dim
self.num_experts_per_tok = num_experts_per_tok
self.num_local_experts = num_local_experts
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.router_jitter_noise = router_jitter_noise
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class MixtralConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an
Mixtral model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Mixtral-7B-v0.1 or Mixtral-7B-Instruct-v0.1.
[mixtralai/Mixtral-8x7B](https://huggingface.co/mixtralai/Mixtral-8x7B)
[mixtralai/Mixtral-7B-Instruct-v0.1](https://huggingface.co/mixtralai/Mixtral-7B-Instruct-v0.1)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MixtralModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention
allows sequence of up to 4096*32 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_theta (`float`, *optional*, defaults to 1000000.0):
The base period of the RoPE embeddings.
sliding_window (`int`, *optional*):
Sliding window attention window size. If not specified, will default to `4096`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_experts_per_tok (`int`, *optional*, defaults to 2):
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter
num_local_experts (`int`, *optional*, defaults to 8):
Number of experts per Sparse MLP layer.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss. See [here]() for more details
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
router_jitter_noise (`float`, *optional*, defaults to 0.0):
Amount of noise to add to the router.
```python
>>> from transformers import MixtralModel, MixtralConfig
>>> # Initializing a Mixtral 7B style configuration
>>> configuration = MixtralConfig()
>>> # Initializing a model from the Mixtral 7B style configuration
>>> model = MixtralModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, head_dim=None, hidden_act='silu', max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=1000000.0, sliding_window=None, attention_dropout=0.0, num_experts_per_tok=2, num_local_experts=8, output_router_logits=False, router_aux_loss_coef=0.001, router_jitter_noise=0.0, **kwargs):
pass
| 2
| 1
| 61
| 3
| 57
| 1
| 3
| 1.11
| 1
| 1
| 0
| 0
| 1
| 20
| 1
| 1
| 160
| 13
| 70
| 52
| 41
| 78
| 28
| 25
| 26
| 3
| 1
| 1
| 3
|
3,804
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralAttention
|
import torch
from ...utils.deprecation import deprecate_kwarg
from ...processing_utils import Unpack
import torch.nn.functional as F
from torch import nn
from ...cache_utils import Cache, DynamicCache
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_mixtral import MixtralConfig
from typing import Callable, Optional, Union
from ...modeling_flash_attention_utils import FlashAttentionKwargs
class MixtralAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: MixtralConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', None) or config.hidden_size // config.num_attention_heads
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=getattr(self.config, 'sliding_window', None), **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class MixtralAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: MixtralConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
pass
| 4
| 1
| 31
| 3
| 28
| 1
| 3
| 0.05
| 1
| 6
| 3
| 0
| 2
| 11
| 2
| 12
| 66
| 8
| 56
| 31
| 45
| 3
| 34
| 23
| 31
| 5
| 1
| 2
| 6
|
3,805
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralBlockSparseTop2MLP
|
from .configuration_mixtral import MixtralConfig
from ...activations import ACT2FN
from torch import nn
class MixtralBlockSparseTop2MLP(nn.Module):
def __init__(self, config: MixtralConfig):
super().__init__()
self.ffn_dim = config.intermediate_size
self.hidden_dim = config.hidden_size
self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
current_hidden_states = self.w2(current_hidden_states)
return current_hidden_states
|
class MixtralBlockSparseTop2MLP(nn.Module):
def __init__(self, config: MixtralConfig):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 7
| 1
| 6
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 6
| 2
| 12
| 16
| 3
| 13
| 10
| 10
| 0
| 13
| 10
| 10
| 1
| 1
| 0
| 2
|
3,806
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralDecoderLayer
|
import torch.nn.functional as F
from typing import Callable, Optional, Union
import torch
from ...utils.deprecation import deprecate_kwarg
from ...processing_utils import Unpack
from ...cache_utils import Cache, DynamicCache
from .configuration_mixtral import MixtralConfig
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
class MixtralDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MixtralConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MixtralAttention(config, layer_idx)
self.block_sparse_moe = MixtralSparseMoeBlock(config)
self.input_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states, _ = self.block_sparse_moe(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class MixtralDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MixtralConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
pass
| 4
| 0
| 39
| 5
| 22
| 12
| 2
| 0.53
| 1
| 9
| 5
| 0
| 2
| 5
| 2
| 12
| 79
| 11
| 45
| 24
| 30
| 24
| 23
| 12
| 20
| 3
| 1
| 1
| 4
|
3,807
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralForCausalLM
|
from ...generation import GenerationMixin
from ...processing_utils import Unpack
import torch.nn.functional as F
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from torch import nn
import torch
from ...cache_utils import Cache, DynamicCache
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
@auto_docstring
class MixtralForCausalLM(MixtralPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
_tp_plan = {'lm_head': 'colwise_rep'}
_pp_plan = {'lm_head': (['hidden_states'], ['logits'])}
def __init__(self, config):
super().__init__(config)
self.model = MixtralModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_local_experts
self.num_experts_per_tok = config.num_experts_per_tok
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_router_logits: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> MoeCausalLMOutputWithPast:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, MixtralForCausalLM
>>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = output_router_logits if output_router_logits is not None else self.config.output_router_logits
outputs: MoeModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_router_logits=output_router_logits, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(outputs.router_logits, self.num_experts, self.num_experts_per_tok, attention_mask)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device)
return MoeCausalLMOutputWithPast(loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits)
|
@auto_docstring
class MixtralForCausalLM(MixtralPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_router_logits: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> MoeCausalLMOutputWithPast:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, MixtralForCausalLM
>>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```'''
pass
| 6
| 1
| 17
| 2
| 11
| 4
| 3
| 0.3
| 2
| 9
| 4
| 0
| 8
| 6
| 8
| 9
| 148
| 24
| 96
| 41
| 68
| 29
| 47
| 24
| 38
| 13
| 2
| 2
| 20
|
3,808
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralForQuestionAnswering
|
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class MixtralForQuestionAnswering(GenericForQuestionAnswering, MixtralPreTrainedModel):
pass
|
class MixtralForQuestionAnswering(GenericForQuestionAnswering, MixtralPreTrainedModel):
pass
| 1
| 0
| 18
| 2
| 13
| 3
| 2
| 0.22
| 1
| 5
| 3
| 0
| 4
| 2
| 4
| 5
| 77
| 11
| 55
| 28
| 36
| 12
| 26
| 14
| 21
| 5
| 2
| 1
| 8
|
3,809
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralForSequenceClassification
|
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class MixtralForSequenceClassification(GenericForSequenceClassification, MixtralPreTrainedModel):
pass
|
class MixtralForSequenceClassification(GenericForSequenceClassification, MixtralPreTrainedModel):
pass
| 1
| 0
| 21
| 2
| 17
| 2
| 3
| 0.11
| 1
| 7
| 3
| 0
| 4
| 3
| 4
| 5
| 90
| 11
| 71
| 31
| 53
| 8
| 36
| 18
| 31
| 9
| 2
| 1
| 12
|
3,810
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralForTokenClassification
|
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class MixtralForTokenClassification(GenericForTokenClassification, MixtralPreTrainedModel):
pass
|
class MixtralForTokenClassification(GenericForTokenClassification, MixtralPreTrainedModel):
pass
| 1
| 0
| 17
| 1
| 14
| 2
| 3
| 0.11
| 1
| 5
| 2
| 0
| 4
| 4
| 4
| 5
| 79
| 8
| 64
| 28
| 41
| 7
| 29
| 15
| 24
| 5
| 2
| 1
| 10
|
3,811
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralModel
|
from .configuration_mixtral import MixtralConfig
import torch
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...cache_utils import Cache, DynamicCache
from torch import nn
import torch.nn.functional as F
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from transformers.utils.generic import check_model_inputs
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
@auto_docstring
class MixtralModel(MixtralPreTrainedModel):
def __init__(self, config: MixtralConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([MixtralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = MixtralRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
@auto_docstring
class MixtralModel(MixtralPreTrainedModel):
def __init__(self, config: MixtralConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> MoeModelOutputWithPast:
pass
| 6
| 0
| 47
| 5
| 36
| 7
| 7
| 0.21
| 1
| 18
| 12
| 0
| 5
| 8
| 6
| 7
| 298
| 34
| 219
| 73
| 179
| 46
| 104
| 39
| 97
| 24
| 2
| 3
| 44
|
3,812
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils.generic import OutputRecorder
from .configuration_mixtral import MixtralConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
@auto_docstring
class MixtralPreTrainedModel(PreTrainedModel):
config: MixtralConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['MixtralDecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {'router_logits': OutputRecorder(MixtralSparseMoeBlock, index=1), 'hidden_states': MixtralDecoderLayer, 'attentions': MixtralAttention}
|
@auto_docstring
class MixtralPreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 10
| 0
| 10
| 0
| 5
| 0.04
| 1
| 0
| 0
| 5
| 1
| 0
| 1
| 1
| 24
| 1
| 23
| 15
| 21
| 1
| 22
| 15
| 20
| 5
| 1
| 2
| 5
|
3,813
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralRMSNorm
|
from ...integrations import use_kernel_forward_from_hub
import torch.nn.functional as F
from torch import nn
import torch
@use_kernel_forward_from_hub('RMSNorm')
class MixtralRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
MixtralRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
@use_kernel_forward_from_hub('RMSNorm')
class MixtralRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
MixtralRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 5
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
3,814
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralRotaryEmbedding
|
import torch.nn.functional as F
import torch
from .configuration_mixtral import MixtralConfig
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from torch import nn
class MixtralRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: MixtralConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class MixtralRotaryEmbedding(nn.Module):
def __init__(self, config: MixtralConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
3,815
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modeling_mixtral.py
|
transformers.models.mixtral.modeling_mixtral.MixtralSparseMoeBlock
|
import torch
import torch.nn.functional as F
from torch import nn
class MixtralSparseMoeBlock(nn.Module):
"""
This implementation is
strictly equivalent to standard MoE with full capacity (no
dropped tokens). It's faster since it formulates MoE operations
in terms of block-sparse operations to accommodate imbalanced
assignments of tokens to experts, whereas standard MoE either
(1) drop tokens at the cost of reduced performance or (2) set
capacity factor to number of experts and thus waste computation
and memory on padding.
"""
def __init__(self, config):
super().__init__()
self.hidden_dim = config.hidden_size
self.ffn_dim = config.intermediate_size
self.num_experts = config.num_local_experts
self.top_k = config.num_experts_per_tok
self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
self.experts = nn.ModuleList([MixtralBlockSparseTop2MLP(config) for _ in range(self.num_experts)])
self.jitter_noise = config.router_jitter_noise
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
""" """
batch_size, sequence_length, hidden_dim = hidden_states.shape
if self.training and self.jitter_noise > 0:
hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
hidden_states = hidden_states.view(-1, hidden_dim)
router_logits = self.gate(hidden_states)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
routing_weights = routing_weights.to(hidden_states.dtype)
final_hidden_states = torch.zeros((batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_layer = self.experts[expert_idx]
idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return (final_hidden_states, router_logits)
|
class MixtralSparseMoeBlock(nn.Module):
'''
This implementation is
strictly equivalent to standard MoE with full capacity (no
dropped tokens). It's faster since it formulates MoE operations
in terms of block-sparse operations to accommodate imbalanced
assignments of tokens to experts, whereas standard MoE either
(1) drop tokens at the cost of reduced performance or (2) set
capacity factor to number of experts and thus waste computation
and memory on padding.
'''
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
''' '''
pass
| 3
| 2
| 27
| 5
| 16
| 7
| 2
| 0.72
| 1
| 5
| 1
| 0
| 2
| 7
| 2
| 12
| 66
| 11
| 32
| 21
| 29
| 23
| 30
| 21
| 27
| 3
| 1
| 1
| 4
|
3,816
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralAttention
|
from ..mistral.modeling_mistral import MistralAttention, MistralForCausalLM, MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel, MistralRMSNorm, MistralRotaryEmbedding
class MixtralAttention(MistralAttention):
pass
|
class MixtralAttention(MistralAttention):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
3,817
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralBlockSparseTop2MLP
|
from ...activations import ACT2FN
from .configuration_mixtral import MixtralConfig
from torch import nn
class MixtralBlockSparseTop2MLP(nn.Module):
def __init__(self, config: MixtralConfig):
super().__init__()
self.ffn_dim = config.intermediate_size
self.hidden_dim = config.hidden_size
self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
current_hidden_states = self.w2(current_hidden_states)
return current_hidden_states
|
class MixtralBlockSparseTop2MLP(nn.Module):
def __init__(self, config: MixtralConfig):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 7
| 1
| 6
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 6
| 2
| 12
| 16
| 3
| 13
| 10
| 10
| 0
| 13
| 10
| 10
| 1
| 1
| 0
| 2
|
3,818
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralDecoderLayer
|
from .configuration_mixtral import MixtralConfig
from ...modeling_layers import GradientCheckpointingLayer
from ...cache_utils import Cache, DynamicCache
from ...utils import TransformersKwargs, logging
from ...utils.deprecation import deprecate_kwarg
from ...processing_utils import Unpack
import torch
from typing import Optional, Union
import torch.nn.functional as F
class MixtralDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MixtralConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MixtralAttention(config, layer_idx)
self.block_sparse_moe = MixtralSparseMoeBlock(config)
self.input_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states, _ = self.block_sparse_moe(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class MixtralDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MixtralConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
pass
| 4
| 0
| 39
| 5
| 22
| 12
| 2
| 0.53
| 1
| 9
| 5
| 0
| 2
| 5
| 2
| 12
| 79
| 11
| 45
| 24
| 30
| 24
| 23
| 12
| 20
| 3
| 1
| 1
| 4
|
3,819
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralForCausalLM
|
from ..mistral.modeling_mistral import MistralAttention, MistralForCausalLM, MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel, MistralRMSNorm, MistralRotaryEmbedding
import torch.nn.functional as F
import torch
from ...utils import TransformersKwargs, logging
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
from typing import Optional, Union
class MixtralForCausalLM(MistralForCausalLM):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
super().__init__(config)
self.model = MixtralModel(config)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_local_experts
self.num_experts_per_tok = config.num_experts_per_tok
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_router_logits: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> MoeCausalLMOutputWithPast:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, MixtralForCausalLM
>>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = output_router_logits if output_router_logits is not None else self.config.output_router_logits
outputs: MoeModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_router_logits=output_router_logits, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(outputs.router_logits, self.num_experts, self.num_experts_per_tok, attention_mask)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device)
return MoeCausalLMOutputWithPast(loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits)
|
class MixtralForCausalLM(MistralForCausalLM):
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_router_logits: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> MoeCausalLMOutputWithPast:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, MixtralForCausalLM
>>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```'''
pass
| 3
| 1
| 59
| 8
| 38
| 14
| 7
| 0.36
| 1
| 8
| 3
| 0
| 2
| 4
| 2
| 11
| 121
| 17
| 77
| 31
| 58
| 28
| 31
| 15
| 28
| 13
| 3
| 2
| 14
|
3,820
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralForQuestionAnswering
|
from ..mistral.modeling_mistral import MistralAttention, MistralForCausalLM, MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel, MistralRMSNorm, MistralRotaryEmbedding
class MixtralForQuestionAnswering(MistralForQuestionAnswering):
pass
|
class MixtralForQuestionAnswering(MistralForQuestionAnswering):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
3,821
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralForSequenceClassification
|
from ..mistral.modeling_mistral import MistralAttention, MistralForCausalLM, MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel, MistralRMSNorm, MistralRotaryEmbedding
class MixtralForSequenceClassification(MistralForSequenceClassification):
pass
|
class MixtralForSequenceClassification(MistralForSequenceClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
3,822
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralForTokenClassification
|
from ..mistral.modeling_mistral import MistralAttention, MistralForCausalLM, MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel, MistralRMSNorm, MistralRotaryEmbedding
class MixtralForTokenClassification(MistralForTokenClassification):
pass
|
class MixtralForTokenClassification(MistralForTokenClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
3,823
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralModel
|
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, logging
import torch
from ...cache_utils import Cache, DynamicCache
from typing import Optional, Union
from ..mistral.modeling_mistral import MistralAttention, MistralForCausalLM, MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel, MistralRMSNorm, MistralRotaryEmbedding
import torch.nn.functional as F
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
class MixtralModel(MistralModel):
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
class MixtralModel(MistralModel):
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> MoeModelOutputWithPast:
pass
| 2
| 0
| 61
| 9
| 51
| 2
| 13
| 0.03
| 1
| 10
| 5
| 0
| 2
| 1
| 2
| 9
| 124
| 19
| 102
| 28
| 85
| 3
| 47
| 14
| 44
| 24
| 3
| 2
| 25
|
3,824
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralPreTrainedModel
|
from ...utils.generic import OutputRecorder
from ..mistral.modeling_mistral import MistralAttention, MistralForCausalLM, MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel, MistralRMSNorm, MistralRotaryEmbedding
class MixtralPreTrainedModel(MistralPreTrainedModel):
_can_compile_fullgraph = False
_can_record_outputs = {'router_logits': OutputRecorder(MixtralSparseMoeBlock, index=1), 'hidden_states': MixtralDecoderLayer, 'attentions': MixtralAttention}
|
class MixtralPreTrainedModel(MistralPreTrainedModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 2
| 0
| 2
| 2
| 1
| 1
| 2
| 2
| 1
| 0
| 2
| 0
| 0
|
3,825
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralRMSNorm
|
from ..mistral.modeling_mistral import MistralAttention, MistralForCausalLM, MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel, MistralRMSNorm, MistralRotaryEmbedding
class MixtralRMSNorm(MistralRMSNorm):
pass
|
class MixtralRMSNorm(MistralRMSNorm):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
3,826
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralRotaryEmbedding
|
from ..mistral.modeling_mistral import MistralAttention, MistralForCausalLM, MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel, MistralRMSNorm, MistralRotaryEmbedding
class MixtralRotaryEmbedding(MistralRotaryEmbedding):
pass
|
class MixtralRotaryEmbedding(MistralRotaryEmbedding):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
3,827
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mixtral/modular_mixtral.py
|
transformers.models.mixtral.modular_mixtral.MixtralSparseMoeBlock
|
from torch import nn
import torch.nn.functional as F
import torch
class MixtralSparseMoeBlock(nn.Module):
"""
This implementation is
strictly equivalent to standard MoE with full capacity (no
dropped tokens). It's faster since it formulates MoE operations
in terms of block-sparse operations to accommodate imbalanced
assignments of tokens to experts, whereas standard MoE either
(1) drop tokens at the cost of reduced performance or (2) set
capacity factor to number of experts and thus waste computation
and memory on padding.
"""
def __init__(self, config):
super().__init__()
self.hidden_dim = config.hidden_size
self.ffn_dim = config.intermediate_size
self.num_experts = config.num_local_experts
self.top_k = config.num_experts_per_tok
self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
self.experts = nn.ModuleList([MixtralBlockSparseTop2MLP(config) for _ in range(self.num_experts)])
self.jitter_noise = config.router_jitter_noise
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
""" """
batch_size, sequence_length, hidden_dim = hidden_states.shape
if self.training and self.jitter_noise > 0:
hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
hidden_states = hidden_states.view(-1, hidden_dim)
router_logits = self.gate(hidden_states)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
routing_weights = routing_weights.to(hidden_states.dtype)
final_hidden_states = torch.zeros((batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_layer = self.experts[expert_idx]
idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return (final_hidden_states, router_logits)
|
class MixtralSparseMoeBlock(nn.Module):
'''
This implementation is
strictly equivalent to standard MoE with full capacity (no
dropped tokens). It's faster since it formulates MoE operations
in terms of block-sparse operations to accommodate imbalanced
assignments of tokens to experts, whereas standard MoE either
(1) drop tokens at the cost of reduced performance or (2) set
capacity factor to number of experts and thus waste computation
and memory on padding.
'''
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
''' '''
pass
| 3
| 2
| 27
| 5
| 16
| 7
| 2
| 0.72
| 1
| 5
| 1
| 0
| 2
| 7
| 2
| 12
| 66
| 11
| 32
| 21
| 29
| 23
| 30
| 21
| 27
| 3
| 1
| 1
| 4
|
3,828
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/configuration_mllama.py
|
transformers.models.mllama.configuration_mllama.MllamaConfig
|
from ...configuration_utils import PretrainedConfig
class MllamaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MllamaForConditionalGeneration`]. It is used to instantiate an
Mllama model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Mllama-9B.
e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MllamaVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MllamaTextConfig`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 128256):
The image token index to encode the image prompt.
Example:
```python
>>> from transformers import MllamaForConditionalGeneration, MllamaConfig, MllamaVisionConfig, MllamaTextConfig
>>> # Initializing a CLIP-vision config
>>> vision_config = MllamaVisionConfig()
>>> # Initializing a Llama config
>>> text_config = MllamaTextConfig()
>>> # Initializing a mllama-11b style configuration
>>> configuration = MllamaConfig(vision_config, text_config)
>>> # Initializing a model from the mllama-11b style configuration
>>> model = MllamaForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'mllama'
attribute_map = {'image_token_id': 'image_token_index'}
sub_configs = {'text_config': MllamaTextConfig, 'vision_config': MllamaVisionConfig}
def __init__(self, vision_config=None, text_config=None, image_token_index=128256, **kwargs):
if vision_config is None:
self.vision_config = MllamaVisionConfig()
logger.info('vision_config is None, using default mllama vision config')
elif isinstance(vision_config, dict):
self.vision_config = MllamaVisionConfig(**vision_config)
elif isinstance(vision_config, MllamaVisionConfig):
self.vision_config = vision_config
self.image_token_index = image_token_index
if text_config is None:
self.text_config = MllamaTextConfig()
logger.info('text_config is None, using default mllama text config')
elif isinstance(text_config, dict):
self.text_config = MllamaTextConfig(**text_config)
elif isinstance(text_config, MllamaTextConfig):
self.text_config = text_config
super().__init__(**kwargs)
|
class MllamaConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MllamaForConditionalGeneration`]. It is used to instantiate an
Mllama model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Mllama-9B.
e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MllamaVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MllamaTextConfig`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 128256):
The image token index to encode the image prompt.
Example:
```python
>>> from transformers import MllamaForConditionalGeneration, MllamaConfig, MllamaVisionConfig, MllamaTextConfig
>>> # Initializing a CLIP-vision config
>>> vision_config = MllamaVisionConfig()
>>> # Initializing a Llama config
>>> text_config = MllamaTextConfig()
>>> # Initializing a mllama-11b style configuration
>>> configuration = MllamaConfig(vision_config, text_config)
>>> # Initializing a model from the mllama-11b style configuration
>>> model = MllamaForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vision_config=None, text_config=None, image_token_index=128256, **kwargs):
pass
| 2
| 1
| 26
| 3
| 23
| 0
| 7
| 1.08
| 1
| 4
| 2
| 0
| 1
| 3
| 1
| 1
| 69
| 15
| 26
| 13
| 18
| 28
| 16
| 7
| 14
| 7
| 1
| 1
| 7
|
3,829
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/configuration_mllama.py
|
transformers.models.mllama.configuration_mllama.MllamaTextConfig
|
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...modeling_rope_utils import rope_config_validation
class MllamaTextConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MllamaTextModel`]. It is used to instantiate an
Mllama text model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Mllama-11B.
e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the Mllama text model. Defines the maximum number of different tokens that can be represented
by the `inputs_ids` passed when calling [`MllamaTextModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimensionality of the embeddings and hidden states.
hidden_act (`str` or `Callable`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler.
num_hidden_layers (`int`, *optional*, defaults to 40):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If not
specified, will default to `num_attention_heads`.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
rope_theta (`float`, *optional*, defaults to `500000.0`):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
cross_attention_layers (`list[int]`, *optional*):
Indices of the cross attention layers. If not specified, will default to [3, 8, 13, 18, 23, 28, 33, 38].
dropout (`float`, *optional*, defaults to 0):
The dropout probability for self- and cross-attention layers.
bos_token_id (`int`, *optional*, defaults to 128000):
The id of the beginning of sentence token.
eos_token_id (`int`, *optional*, defaults to 128001):
The id of the end of sentence token.
pad_token_id (`int`, *optional*, defaults to 128004):
The id of the padding token.
Example:
```python
>>> from transformers import MllamaTextModel, MllamaTextConfig
>>> # Initializing a Mllama text config
>>> config = MllamaTextConfig()
>>> # Initializing a model from the Mllama text configuration
>>> model = MllamaTextModel(config)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'mllama_text_model'
base_config_key = 'text_config'
def __init__(self, vocab_size: int=128256, hidden_size: int=4096, hidden_act: str='silu', num_hidden_layers: int=40, num_attention_heads: int=32, num_key_value_heads: int=8, intermediate_size: int=14336, rope_theta: float=500000, rope_scaling: Optional[dict]=None, rms_norm_eps: float=1e-05, max_position_embeddings: int=131072, initializer_range: float=0.02, use_cache: bool=True, tie_word_embeddings: bool=False, cross_attention_layers: Optional[list[int]]=None, dropout: float=0, bos_token_id: int=128000, eos_token_id: int=128001, pad_token_id: Optional[int]=128004, **kwargs):
if cross_attention_layers is None:
cross_attention_layers = [3, 8, 13, 18, 23, 28, 33, 38]
self.vocab_size = vocab_size
self.num_hidden_layers = num_hidden_layers
self.cross_attention_layers = cross_attention_layers
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.initializer_range = initializer_range
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rms_norm_eps = rms_norm_eps
self.intermediate_size = intermediate_size
self.dropout = dropout
self.hidden_act = hidden_act
self.rope_scaling = rope_scaling
self.max_position_embeddings = max_position_embeddings
rope_config_validation(self)
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class MllamaTextConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MllamaTextModel`]. It is used to instantiate an
Mllama text model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Mllama-11B.
e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the Mllama text model. Defines the maximum number of different tokens that can be represented
by the `inputs_ids` passed when calling [`MllamaTextModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimensionality of the embeddings and hidden states.
hidden_act (`str` or `Callable`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler.
num_hidden_layers (`int`, *optional*, defaults to 40):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If not
specified, will default to `num_attention_heads`.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
rope_theta (`float`, *optional*, defaults to `500000.0`):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
cross_attention_layers (`list[int]`, *optional*):
Indices of the cross attention layers. If not specified, will default to [3, 8, 13, 18, 23, 28, 33, 38].
dropout (`float`, *optional*, defaults to 0):
The dropout probability for self- and cross-attention layers.
bos_token_id (`int`, *optional*, defaults to 128000):
The id of the beginning of sentence token.
eos_token_id (`int`, *optional*, defaults to 128001):
The id of the end of sentence token.
pad_token_id (`int`, *optional*, defaults to 128004):
The id of the padding token.
Example:
```python
>>> from transformers import MllamaTextModel, MllamaTextConfig
>>> # Initializing a Mllama text config
>>> config = MllamaTextConfig()
>>> # Initializing a model from the Mllama text configuration
>>> model = MllamaTextModel(config)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size: int=128256, hidden_size: int=4096, hidden_act: str='silu', num_hidden_layers: int=40, num_attention_heads: int=32, num_key_value_heads: int=8, intermediate_size: int=14336, rope_theta: float=500000, rope_scaling: Optional[dict]=None, rms_norm_eps: float=1e-05, max_position_embeddings: int=131072, initializer_range: float=0.02, use_cache: bool=True, tie_word_embeddings: bool=False, cross_attention_layers: Optional[list[int]]=None, dropout: float=0, bos_token_id: int=128000, eos_token_id: int=128001, pad_token_id: Optional[int]=128004, **kwargs):
pass
| 2
| 1
| 50
| 2
| 48
| 0
| 2
| 1.82
| 1
| 5
| 0
| 0
| 1
| 15
| 1
| 1
| 156
| 12
| 51
| 41
| 27
| 93
| 23
| 19
| 21
| 2
| 1
| 1
| 2
|
3,830
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/configuration_mllama.py
|
transformers.models.mllama.configuration_mllama.MllamaVisionConfig
|
from typing import Optional
from ...configuration_utils import PretrainedConfig
class MllamaVisionConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MllamaVisionModel`]. It is used to instantiate an
Mllama vision model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Mllama-11B.
e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the encoder layers and the pooler layer.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_global_layers (`int`, *optional*, defaults to 8):
Number of global layers in the Transformer encoder.
Vision model has a second transformer encoder, called global.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input image.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
vision_output_dim (`int`, *optional*, defaults to 7680):
Dimensionality of the vision model output. Includes output of transformer
encoder with intermediate layers and global transformer encoder.
image_size (`int`, *optional*, defaults to 448):
The size (resolution) of each image *tile*.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
max_num_tiles (`int`, *optional*, defaults to 4):
Maximum number of tiles for image splitting.
intermediate_layers_indices (`list[int]`, *optional*, defaults to [3, 7, 15, 23, 30]):
Indices of intermediate layers of transformer encoder from which to extract and output features.
These output features are concatenated with final hidden state of transformer encoder.
supported_aspect_ratios (`list[list[int]]`, *optional*):
List of supported aspect ratios for image splitting. If not specified, the default supported aspect ratios
are [[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [3, 1], [4, 1]] for `max_num_tiles=4`.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import MllamaVisionConfig, MllamaVisionModel
>>> # Initializing a Llama config
>>> config = MllamaVisionConfig()
>>> # Initializing a vision model from the mllama-11b style configuration
>>> model = MllamaVisionModel(config)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'mllama_vision_model'
base_config_key = 'vision_config'
def __init__(self, hidden_size: int=1280, hidden_act: str='gelu', num_hidden_layers: int=32, num_global_layers: int=8, num_attention_heads: int=16, num_channels: int=3, intermediate_size: int=5120, vision_output_dim: int=7680, image_size: int=448, patch_size: int=14, norm_eps: float=1e-05, max_num_tiles: int=4, intermediate_layers_indices: Optional[list[int]]=None, supported_aspect_ratios: Optional[list[list[int]]]=None, initializer_range: float=0.02, **kwargs):
if supported_aspect_ratios is None:
if max_num_tiles != 4:
raise ValueError('max_num_tiles must be 4 for default supported aspect ratios')
supported_aspect_ratios = [[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [3, 1], [4, 1]]
if intermediate_layers_indices is None:
intermediate_layers_indices = [3, 7, 15, 23, 30]
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.num_hidden_layers = num_hidden_layers
self.num_channels = num_channels
self.intermediate_size = intermediate_size
self.image_size = image_size
self.vision_output_dim = vision_output_dim
self.patch_size = patch_size
self.intermediate_layers_indices = intermediate_layers_indices
self.num_global_layers = num_global_layers
self.max_num_tiles = max_num_tiles
self.norm_eps = norm_eps
self.attention_heads = num_attention_heads
self.supported_aspect_ratios = supported_aspect_ratios
self.initializer_range = initializer_range
super().__init__(**kwargs)
@property
def max_aspect_ratio_id(self) -> int:
return len(self.supported_aspect_ratios)
|
class MllamaVisionConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MllamaVisionModel`]. It is used to instantiate an
Mllama vision model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Mllama-11B.
e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the encoder layers and the pooler layer.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_global_layers (`int`, *optional*, defaults to 8):
Number of global layers in the Transformer encoder.
Vision model has a second transformer encoder, called global.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input image.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
vision_output_dim (`int`, *optional*, defaults to 7680):
Dimensionality of the vision model output. Includes output of transformer
encoder with intermediate layers and global transformer encoder.
image_size (`int`, *optional*, defaults to 448):
The size (resolution) of each image *tile*.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
max_num_tiles (`int`, *optional*, defaults to 4):
Maximum number of tiles for image splitting.
intermediate_layers_indices (`list[int]`, *optional*, defaults to [3, 7, 15, 23, 30]):
Indices of intermediate layers of transformer encoder from which to extract and output features.
These output features are concatenated with final hidden state of transformer encoder.
supported_aspect_ratios (`list[list[int]]`, *optional*):
List of supported aspect ratios for image splitting. If not specified, the default supported aspect ratios
are [[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [3, 1], [4, 1]] for `max_num_tiles=4`.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import MllamaVisionConfig, MllamaVisionModel
>>> # Initializing a Llama config
>>> config = MllamaVisionConfig()
>>> # Initializing a vision model from the mllama-11b style configuration
>>> model = MllamaVisionModel(config)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size: int=1280, hidden_act: str='gelu', num_hidden_layers: int=32, num_global_layers: int=8, num_attention_heads: int=16, num_channels: int=3, intermediate_size: int=5120, vision_output_dim: int=7680, image_size: int=448, patch_size: int=14, norm_eps: float=1e-05, max_num_tiles: int=4, intermediate_layers_indices: Optional[list[int]]=None, supported_aspect_ratios: Optional[list[list[int]]]=None, initializer_range: float=0.02, **kwargs):
pass
@property
def max_aspect_ratio_id(self) -> int:
pass
| 4
| 1
| 23
| 1
| 22
| 0
| 3
| 1.13
| 1
| 5
| 0
| 0
| 2
| 15
| 2
| 2
| 113
| 13
| 47
| 39
| 25
| 53
| 28
| 20
| 25
| 4
| 1
| 2
| 5
|
3,831
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/convert_mllama_weights_to_hf.py
|
transformers.models.mllama.convert_mllama_weights_to_hf.MllamaConverter
|
from transformers.convert_slow_tokenizer import TikTokenConverter
from typing import Optional
from transformers import GenerationConfig, MllamaConfig, MllamaForConditionalGeneration, MllamaImageProcessor, PreTrainedTokenizerFast
class MllamaConverter(TikTokenConverter):
def __init__(self, vocab_file, special_tokens: list[str], pattern: str, model_max_length: int, chat_template: Optional[str]=None, **kwargs):
super().__init__(vocab_file, pattern=pattern)
self.additional_special_tokens = special_tokens
tokenizer = self.converted()
if chat_template is not None:
kwargs['chat_template'] = chat_template
self.tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer, model_input_names=['input_ids', 'attention_mask'], model_max_length=model_max_length, **kwargs)
|
class MllamaConverter(TikTokenConverter):
def __init__(self, vocab_file, special_tokens: list[str], pattern: str, model_max_length: int, chat_template: Optional[str]=None, **kwargs):
pass
| 2
| 0
| 20
| 0
| 20
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 1
| 2
| 1
| 5
| 21
| 0
| 21
| 13
| 11
| 0
| 8
| 5
| 6
| 2
| 1
| 1
| 2
|
3,832
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/image_processing_mllama.py
|
transformers.models.mllama.image_processing_mllama.MllamaImageProcessor
|
from ...image_transforms import PaddingMode, get_image_size, pad, resize
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_vision_available, make_nested_list_of_images, to_numpy_array, validate_preprocess_arguments
from typing import Optional, Union
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...utils import TensorType, logging
import numpy as np
class MllamaImageProcessor(BaseImageProcessor):
"""
Constructs a Mllama image processor.
Args:
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA.
Only has an effect if the input image is in the PIL format.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image tile. Should be a dictionary containing 'height' and 'width' keys, both with integer values.
The height and width values should be equal.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to 0.0):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether or not to pad the images to the largest height and width in the batch.
max_image_tiles (`int`, *optional*, defaults to 4):
The maximum number of tiles to split the image into.
"""
model_input_names = ['pixel_values', 'num_tiles', 'aspect_ratio_ids', 'aspect_ratio_mask']
def __init__(self, do_convert_rgb: bool=True, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: bool=True, max_image_tiles: int=4, **kwargs) -> None:
super().__init__(**kwargs)
self.do_convert_rgb = do_convert_rgb
self.do_resize = do_resize
self.size = size if size is not None else {'height': 224, 'width': 224}
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_pad = do_pad
self.max_image_tiles = max_image_tiles
_validate_mllama_preprocess_arguments(self.do_resize, self.size, self.do_pad, self.max_image_tiles)
def preprocess(self, images: ImageInput, do_convert_rgb: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, max_image_tiles: Optional[int]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, return_tensors: Optional[Union[str, TensorType]]=None):
"""
Preprocess a batch of images.
Args:
images (`ImageInput`):
A list of images to preprocess.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image tile. Should be a dictionary containing 'height' and 'width' keys, both with integer values.
The height and width values should be equal.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether or not to pad the images to the largest height and width in the batch.
max_image_tiles (`int`, *optional*, defaults to `self.max_image_tiles`):
The maximum number of tiles to split the image into.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
Returns:
`BatchFeature` of the following structure:
- **pixel_values** (`TensorType`): The preprocessed pixel values.
- **aspect_ratio_ids** (`TensorType`): The aspect ratio ids of the images.
- **num_tiles** (`list[list[int]]`): The number of tiles for each image in the batch.
"""
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_pad = do_pad if do_pad is not None else self.do_pad
max_image_tiles = max_image_tiles if max_image_tiles is not None else self.max_image_tiles
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
_validate_mllama_preprocess_arguments(do_resize, size, do_pad, max_image_tiles)
images = self.fetch_images(images)
images_list = make_nested_list_of_images(images)
if self.do_convert_rgb:
images_list = [[convert_to_rgb(image) for image in images] for images in images_list]
batch_images = []
batch_aspect_ratios = []
for images in images_list:
sample_images = []
sample_aspect_ratios = []
for image in images:
if input_data_format is None and isinstance(image, PIL.Image.Image):
input_data_format = ChannelDimension.LAST
image = to_numpy_array(image)
data_format = ChannelDimension.FIRST
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
image, aspect_ratio = self.resize(image=image, size=size, resample=resample, max_image_tiles=max_image_tiles, input_data_format=data_format, data_format=data_format)
image = self.pad(image=image, size=size, aspect_ratio=aspect_ratio, input_data_format=data_format, data_format=data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=data_format, data_format=data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=data_format, data_format=data_format)
num_tiles_height, num_tiles_width = aspect_ratio
image = split_to_tiles(image, num_tiles_height, num_tiles_width)
sample_images.append(image)
sample_aspect_ratios.append((num_tiles_height, num_tiles_width))
batch_images.append(sample_images)
batch_aspect_ratios.append(sample_aspect_ratios)
images, num_tiles = pack_images(batch_images, max_image_tiles)
aspect_ratio_ids = convert_aspect_ratios_to_ids(batch_aspect_ratios, max_image_tiles=max_image_tiles)
aspect_ratio_mask = build_aspect_ratio_mask(batch_aspect_ratios, max_image_tiles=max_image_tiles)
encoded_inputs = BatchFeature(data={'pixel_values': images, 'aspect_ratio_ids': aspect_ratio_ids, 'aspect_ratio_mask': aspect_ratio_mask}, tensor_type=return_tensors)
encoded_inputs['num_tiles'] = num_tiles
return encoded_inputs
def pad(self, image: np.ndarray, size: dict[str, int], aspect_ratio: tuple[int, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Pad an image to the `size` x `aspect_ratio`. For example, if size is {height: 224, width: 224} and aspect ratio is
(1, 2), the image will be padded to 224x448.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
aspect_ratio (`tuple[int, int]`):
The aspect ratio of the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`np.ndarray`: The padded image.
"""
_validate_size(size)
image_height, image_width = get_image_size(image, channel_dim=input_data_format)
num_tiles_height, num_tiles_width = aspect_ratio
padded_height = num_tiles_height * size['height']
padded_width = num_tiles_width * size['width']
pad_size = ((0, padded_height - image_height), (0, padded_width - image_width))
image = pad(image, pad_size, mode=PaddingMode.CONSTANT, constant_values=0, data_format=data_format, input_data_format=input_data_format)
return image
def resize(self, image: np.ndarray, size: dict[str, int], max_image_tiles: int, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Union[np.ndarray, tuple[int, int]]:
"""
Resizes an image to fit within a tiled canvas while maintaining its aspect ratio.
The optimal canvas size is calculated based on the maximum number of tiles and the tile size.
The function first determines the best tile arrangement for the image, then resizes the image
to fit within this canvas. The resized image and the number of tiles along the height and width
dimensions are returned.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
max_image_tiles (`int`):
The maximum number of tiles to split the image into.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`Union[np.ndarray, tuple[int, int]]`: The resized image and a tuple containing the number of tiles
along the height and width dimensions.
"""
_validate_size(size)
image_height, image_width = get_image_size(image, channel_dim=input_data_format)
tile_size = size['height']
canvas_height, canvas_width = get_optimal_tiled_canvas(image_height=image_height, image_width=image_width, max_image_tiles=max_image_tiles, tile_size=tile_size)
num_tiles_height = canvas_height // tile_size
num_tiles_width = canvas_width // tile_size
new_height, new_width = get_image_size_fit_to_canvas(image_height=image_height, image_width=image_width, canvas_height=canvas_height, canvas_width=canvas_width, tile_size=tile_size)
image = resize(image, (new_height, new_width), resample=resample, data_format=data_format, input_data_format=input_data_format)
return (image, (num_tiles_height, num_tiles_width))
|
class MllamaImageProcessor(BaseImageProcessor):
'''
Constructs a Mllama image processor.
Args:
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA.
Only has an effect if the input image is in the PIL format.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image tile. Should be a dictionary containing 'height' and 'width' keys, both with integer values.
The height and width values should be equal.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to 0.0):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether or not to pad the images to the largest height and width in the batch.
max_image_tiles (`int`, *optional*, defaults to 4):
The maximum number of tiles to split the image into.
'''
def __init__(self, do_convert_rgb: bool=True, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: bool=True, max_image_tiles: int=4, **kwargs) -> None:
pass
def preprocess(self, images: ImageInput, do_convert_rgb: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, max_image_tiles: Optional[int]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, return_tensors: Optional[Union[str, TensorType]]=None):
'''
Preprocess a batch of images.
Args:
images (`ImageInput`):
A list of images to preprocess.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image tile. Should be a dictionary containing 'height' and 'width' keys, both with integer values.
The height and width values should be equal.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether or not to pad the images to the largest height and width in the batch.
max_image_tiles (`int`, *optional*, defaults to `self.max_image_tiles`):
The maximum number of tiles to split the image into.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
Returns:
`BatchFeature` of the following structure:
- **pixel_values** (`TensorType`): The preprocessed pixel values.
- **aspect_ratio_ids** (`TensorType`): The aspect ratio ids of the images.
- **num_tiles** (`list[list[int]]`): The number of tiles for each image in the batch.
'''
pass
def pad(self, image: np.ndarray, size: dict[str, int], aspect_ratio: tuple[int, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Pad an image to the `size` x `aspect_ratio`. For example, if size is {height: 224, width: 224} and aspect ratio is
(1, 2), the image will be padded to 224x448.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
aspect_ratio (`tuple[int, int]`):
The aspect ratio of the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`np.ndarray`: The padded image.
'''
pass
def resize(self, image: np.ndarray, size: dict[str, int], max_image_tiles: int, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Union[np.ndarray, tuple[int, int]]:
'''
Resizes an image to fit within a tiled canvas while maintaining its aspect ratio.
The optimal canvas size is calculated based on the maximum number of tiles and the tile size.
The function first determines the best tile arrangement for the image, then resizes the image
to fit within this canvas. The resized image and the number of tiles along the height and width
dimensions are returned.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
max_image_tiles (`int`):
The maximum number of tiles to split the image into.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`Union[np.ndarray, tuple[int, int]]`: The resized image and a tuple containing the number of tiles
along the height and width dimensions.
'''
pass
| 5
| 4
| 81
| 9
| 47
| 25
| 6
| 0.69
| 1
| 8
| 3
| 0
| 4
| 11
| 4
| 24
| 361
| 43
| 188
| 85
| 138
| 130
| 79
| 40
| 74
| 17
| 3
| 3
| 23
|
3,833
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaCrossAttentionDecoderLayer
|
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from ...cache_utils import Cache, DynamicCache
import torch
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch.nn.functional as F
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...utils.deprecation import deprecate_kwarg
from ...modeling_layers import GradientCheckpointingLayer
class MllamaCrossAttentionDecoderLayer(GradientCheckpointingLayer):
"""Cross-attention transformer block with tanh-gated attention and feedforward."""
def __init__(self, config: MllamaTextConfig, layer_idx: int) -> None:
super().__init__()
self.layer_idx = layer_idx
self.cross_attn = MllamaTextCrossAttention(config, layer_idx=layer_idx)
self.input_layernorm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.cross_attn_attn_gate = torch.nn.Parameter(torch.zeros(1))
self.mlp = MllamaTextMLP(config)
self.post_attention_layernorm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.cross_attn_mlp_gate = torch.nn.Parameter(torch.zeros(1))
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, cross_attention_states: torch.Tensor, cross_attention_mask: torch.Tensor, attention_mask: torch.Tensor, full_text_row_masked_out_mask: tuple[torch.Tensor, torch.Tensor], position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, attn_weights = self.cross_attn(hidden_states=hidden_states, attention_mask=cross_attention_mask, cross_attention_states=cross_attention_states, past_key_values=past_key_values, cache_position=cache_position, **kwargs)
hidden_states = residual + self.cross_attn_attn_gate.tanh() * hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
if full_text_row_masked_out_mask is not None:
hidden_states = full_text_row_masked_out_mask[:, 0] * hidden_states
hidden_states = residual + self.cross_attn_mlp_gate.tanh() * hidden_states
return hidden_states
|
class MllamaCrossAttentionDecoderLayer(GradientCheckpointingLayer):
'''Cross-attention transformer block with tanh-gated attention and feedforward.'''
def __init__(self, config: MllamaTextConfig, layer_idx: int) -> None:
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, cross_attention_states: torch.Tensor, cross_attention_mask: torch.Tensor, attention_mask: torch.Tensor, full_text_row_masked_out_mask: tuple[torch.Tensor, torch.Tensor], position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor]:
pass
| 4
| 1
| 27
| 4
| 23
| 1
| 3
| 0.04
| 1
| 8
| 4
| 0
| 2
| 7
| 2
| 12
| 58
| 10
| 47
| 26
| 31
| 2
| 27
| 13
| 24
| 4
| 1
| 1
| 5
|
3,834
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaForCausalLM
|
import torch.nn.functional as F
from ...generation import GenerationMixin
import torch
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from torch import nn
@auto_docstring(custom_intro='\n The Mllama Text Model with a language modeling head on top.\n ')
class MllamaForCausalLM(MllamaPreTrainedModel, GenerationMixin):
config: MllamaTextConfig
_can_compile_fullgraph = True
base_model_prefix = 'language_model'
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
super().__init__(config.get_text_config())
self.text_config = config.get_text_config()
self.vocab_size = self.text_config.vocab_size
self.model = MllamaTextModel._from_config(self.text_config)
self.lm_head = nn.Linear(self.text_config.hidden_size, self.vocab_size, bias=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, cross_attention_states: Optional[torch.LongTensor]=None, cross_attention_mask: Optional[torch.LongTensor]=None, full_text_row_masked_out_mask: Optional[tuple[torch.Tensor, torch.Tensor]]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]:
"""
cross_attention_states (`torch.FloatTensor`, *optional*):
Output of the vision model, used for cross-attention. This tensor contains the processed image features that
the language model will attend to.
cross_attention_mask (`torch.Tensor` of shape `(batch_size, seq_length, max_num_images, max_num_tiles)`, *optional*):
Cross-attention mask to control the interaction between text tokens and image tiles.
This 4D tensor defines which image tiles each text token should attend to.
For each text token (in seq_length):
- 1 indicates the token **should attend** to the corresponding image tile
- 0 indicates the token **should not attend** to the corresponding image tile
full_text_row_masked_out_mask (`tuple[torch.Tensor, torch.Tensor]`, *optional*):
A tuple containing two tensors that mask out rows in the cross-attention mechanism:
- The first tensor has shape `(batch_size, 1, seq_length, 1)` and contains values of 0 or 1.
A value of 0 indicates that the corresponding text token's entire row in the cross-attention
matrix should be masked out (all image tokens ignored).
- The second tensor has the same shape and is used internally to apply the masking during
the forward pass of cross-attention layers.
This mask is derived from the cross_attention_mask and is used to handle cases where a text token
should not attend to any image token.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, MllamaForCausalLM
>>> model = MllamaForCausalLM.from_pretrained("Llama-3.2-11B-Vision")
>>> tokenizer = AutoTokenizer.from_pretrained("Llama-3.2-11B-Vision")
>>> prompt = "If I had to write a haiku, it would be:"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=40, do_sample=True, temperature=0.6)
>>> result = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
>>> print(result)
If I had to write a haiku, it would be: "Snowflakes gently fall" - simple, yet peaceful.
I love the idea of snowflakes gently falling, each one
```
"""
outputs = self.model(input_ids=input_ids, cross_attention_states=cross_attention_states, attention_mask=attention_mask, position_ids=position_ids, cross_attention_mask=cross_attention_mask, full_text_row_masked_out_mask=full_text_row_masked_out_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :]).float()
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n The Mllama Text Model with a language modeling head on top.\n ')
class MllamaForCausalLM(MllamaPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, cross_attention_states: Optional[torch.LongTensor]=None, cross_attention_mask: Optional[torch.LongTensor]=None, full_text_row_masked_out_mask: Optional[tuple[torch.Tensor, torch.Tensor]]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]:
'''
cross_attention_states (`torch.FloatTensor`, *optional*):
Output of the vision model, used for cross-attention. This tensor contains the processed image features that
the language model will attend to.
cross_attention_mask (`torch.Tensor` of shape `(batch_size, seq_length, max_num_images, max_num_tiles)`, *optional*):
Cross-attention mask to control the interaction between text tokens and image tiles.
This 4D tensor defines which image tiles each text token should attend to.
For each text token (in seq_length):
- 1 indicates the token **should attend** to the corresponding image tile
- 0 indicates the token **should not attend** to the corresponding image tile
full_text_row_masked_out_mask (`tuple[torch.Tensor, torch.Tensor]`, *optional*):
A tuple containing two tensors that mask out rows in the cross-attention mechanism:
- The first tensor has shape `(batch_size, 1, seq_length, 1)` and contains values of 0 or 1.
A value of 0 indicates that the corresponding text token's entire row in the cross-attention
matrix should be masked out (all image tokens ignored).
- The second tensor has the same shape and is used internally to apply the masking during
the forward pass of cross-attention layers.
This mask is derived from the cross_attention_mask and is used to handle cases where a text token
should not attend to any image token.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, MllamaForCausalLM
>>> model = MllamaForCausalLM.from_pretrained("Llama-3.2-11B-Vision")
>>> tokenizer = AutoTokenizer.from_pretrained("Llama-3.2-11B-Vision")
>>> prompt = "If I had to write a haiku, it would be:"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=40, do_sample=True, temperature=0.6)
>>> result = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
>>> print(result)
If I had to write a haiku, it would be: "Snowflakes gently fall" - simple, yet peaceful.
I love the idea of snowflakes gently falling, each one
```
'''
pass
| 6
| 1
| 15
| 2
| 9
| 4
| 2
| 0.37
| 2
| 8
| 3
| 0
| 8
| 4
| 8
| 11
| 132
| 21
| 82
| 42
| 52
| 30
| 39
| 23
| 30
| 8
| 2
| 1
| 15
|
3,835
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaForConditionalGeneration
|
from typing import Callable, Optional, Union
from torch import nn
import torch.nn.functional as F
from ...generation import GenerationMixin
import torch
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
@auto_docstring(custom_intro='\n The Mllama model which consists of a vision encoder and a language model.\n ')
class MllamaForConditionalGeneration(MllamaPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {'^language_model.model': 'model.language_model', '^vision_model': 'model.vision_model', '^multi_modal_projector': 'model.multi_modal_projector', '^language_model.lm_head': 'lm_head'}
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config: MllamaConfig):
super().__init__(config)
self.model = MllamaModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def set_decoder(self, decoder):
self.model.set_decoder(decoder)
def get_decoder(self):
return self.model.get_decoder()
@property
def language_model(self):
return self.model.language_model
@property
def vision_model(self):
return self.model.vision_model
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, aspect_ratio_mask: Optional[torch.Tensor]=None, aspect_ratio_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_mask: Optional[torch.Tensor]=None, cross_attention_states: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]:
"""
aspect_ratio_mask (`torch.Tensor` of shape `(batch_size, max_num_images, max_num_tiles)`, *optional*):
Mask to avoid performing attention on padding tiles. Mask values selected in `[0, 1]`:
- 1 for tiles that are **not masked**,
- 0 for tiles that are **masked**.
aspect_ratio_ids (`torch.Tensor` of shape `(batch_size, max_num_images)`, *optional*):
Aspect ratio ids used to select the appropriate precomputed tile embeddings based on the aspect ratio of each input image.
These ids correspond to indices in the model's list of supported aspect ratios, offset by 1.
For example, if the model supports aspect ratios [[1, 1], [1, 2], [2, 1]]:
- An image with aspect ratio [1, 1] would have ID 1
- An image with aspect ratio [1, 2] would have ID 2
- An image with aspect ratio [2, 1] would have ID 3
The id 0 is reserved for padding (i.e., no image).
If an image has aspect ratio [1, 2], that means it was split into 2 tiles horizontally, and its `aspect_ratio_id` would be 2.
cross_attention_mask (`torch.Tensor` of shape `(batch_size, seq_length, max_num_images, max_num_tiles)`, *optional*):
Cross-attention mask to control the interaction between text tokens and image tiles.
This 4D tensor defines which image tiles each text token should attend to.
For each text token (in seq_length):
- 1 indicates the token **should attend** to the corresponding image tile
- 0 indicates the token **should not attend** to the corresponding image tile
cross_attention_states (`torch.FloatTensor`, *optional*):
Output of the vision model, used for cross-attention. This tensor contains the processed image features that
the language model will attend to.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, MllamaForConditionalGeneration
>>> checkpoint = "meta-llama/Llama-3.2-11B-Vision"
>>> model = MllamaForConditionalGeneration.from_pretrained(checkpoint)
>>> processor = AutoProcessor.from_pretrained(checkpoint)
>>> prompt = "<|image|>If I had to write a haiku for this one"
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=prompt, images=image, return_tensors="pt")
>>> # Generate
>>> output = model.generate(**inputs, max_new_tokens=15)
>>> prompt_len = inputs.input_ids.shape[-1]
>>> generated_ids = output[:, prompt_len:]
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
>>> print(generated_text)
[', it would be:.\\\\nA stop sign in Chinatown.\\\\n']
```
"""
outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, aspect_ratio_mask=aspect_ratio_mask, aspect_ratio_ids=aspect_ratio_ids, cross_attention_mask=cross_attention_mask, cross_attention_states=cross_attention_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.config.text_config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids=None, inputs_embeds=None, attention_mask=None, position_ids=None, pixel_values=None, aspect_ratio_ids=None, aspect_ratio_mask=None, cross_attention_mask=None, past_key_values=None, use_cache=False, cache_position=None, logits_to_keep=None, **kwargs):
model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, use_cache=use_cache, inputs_embeds=inputs_embeds, position_ids=position_ids, attention_mask=attention_mask, pixel_values=pixel_values, aspect_ratio_ids=aspect_ratio_ids, aspect_ratio_mask=aspect_ratio_mask, cross_attention_mask=cross_attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs)
if cache_position[0] != 0:
model_inputs['pixel_values'] = None
model_inputs['aspect_ratio_ids'] = None
model_inputs['aspect_ratio_mask'] = None
return model_inputs
def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs):
cross_attention_mask_prev = model_kwargs.get('cross_attention_mask', None)
model_kwargs = super()._update_model_kwargs_for_generation(outputs=outputs, model_kwargs=model_kwargs, is_encoder_decoder=is_encoder_decoder, **kwargs)
if cross_attention_mask_prev is not None:
model_kwargs['cross_attention_mask'] = torch.cat([cross_attention_mask_prev, cross_attention_mask_prev[:, -1:, ...]], dim=1)
return model_kwargs
|
@auto_docstring(custom_intro='\n The Mllama model which consists of a vision encoder and a language model.\n ')
class MllamaForConditionalGeneration(MllamaPreTrainedModel, GenerationMixin):
def __init__(self, config: MllamaConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def set_decoder(self, decoder):
pass
def get_decoder(self):
pass
@property
def language_model(self):
pass
@property
def vision_model(self):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, aspect_ratio_mask: Optional[torch.Tensor]=None, aspect_ratio_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_mask: Optional[torch.Tensor]=None, cross_attention_states: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]:
'''
aspect_ratio_mask (`torch.Tensor` of shape `(batch_size, max_num_images, max_num_tiles)`, *optional*):
Mask to avoid performing attention on padding tiles. Mask values selected in `[0, 1]`:
- 1 for tiles that are **not masked**,
- 0 for tiles that are **masked**.
aspect_ratio_ids (`torch.Tensor` of shape `(batch_size, max_num_images)`, *optional*):
Aspect ratio ids used to select the appropriate precomputed tile embeddings based on the aspect ratio of each input image.
These ids correspond to indices in the model's list of supported aspect ratios, offset by 1.
For example, if the model supports aspect ratios [[1, 1], [1, 2], [2, 1]]:
- An image with aspect ratio [1, 1] would have ID 1
- An image with aspect ratio [1, 2] would have ID 2
- An image with aspect ratio [2, 1] would have ID 3
The id 0 is reserved for padding (i.e., no image).
If an image has aspect ratio [1, 2], that means it was split into 2 tiles horizontally, and its `aspect_ratio_id` would be 2.
cross_attention_mask (`torch.Tensor` of shape `(batch_size, seq_length, max_num_images, max_num_tiles)`, *optional*):
Cross-attention mask to control the interaction between text tokens and image tiles.
This 4D tensor defines which image tiles each text token should attend to.
For each text token (in seq_length):
- 1 indicates the token **should attend** to the corresponding image tile
- 0 indicates the token **should not attend** to the corresponding image tile
cross_attention_states (`torch.FloatTensor`, *optional*):
Output of the vision model, used for cross-attention. This tensor contains the processed image features that
the language model will attend to.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, MllamaForConditionalGeneration
>>> checkpoint = "meta-llama/Llama-3.2-11B-Vision"
>>> model = MllamaForConditionalGeneration.from_pretrained(checkpoint)
>>> processor = AutoProcessor.from_pretrained(checkpoint)
>>> prompt = "<|image|>If I had to write a haiku for this one"
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=prompt, images=image, return_tensors="pt")
>>> # Generate
>>> output = model.generate(**inputs, max_new_tokens=15)
>>> prompt_len = inputs.input_ids.shape[-1]
>>> generated_ids = output[:, prompt_len:]
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
>>> print(generated_text)
[', it would be:.\\nA stop sign in Chinatown.\\n']
```
'''
pass
def prepare_inputs_for_generation(self, input_ids=None, inputs_embeds=None, attention_mask=None, position_ids=None, pixel_values=None, aspect_ratio_ids=None, aspect_ratio_mask=None, cross_attention_mask=None, past_key_values=None, use_cache=False, cache_position=None, logits_to_keep=None, **kwargs):
pass
def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs):
pass
| 16
| 1
| 25
| 3
| 17
| 5
| 3
| 0.3
| 2
| 9
| 4
| 0
| 10
| 9
| 10
| 13
| 263
| 39
| 175
| 61
| 127
| 53
| 79
| 26
| 68
| 11
| 2
| 2
| 31
|
3,836
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaPreTrainedModel
|
from typing import Callable, Optional, Union
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
from ...modeling_attn_mask_utils import AttentionMaskConverter
import torch.nn.functional as F
from ...utils.generic import OutputRecorder, check_model_inputs
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
from ...cache_utils import Cache, DynamicCache
@auto_docstring
class MllamaPreTrainedModel(PreTrainedModel):
config: MllamaConfig
base_model_prefix = ''
supports_gradient_checkpointing = True
_no_split_modules = ['MllamaVisionEncoderLayer', 'MllamaCrossAttentionDecoderLayer', 'MllamaSelfAttentionDecoderLayer']
_can_compile_fullgraph = False
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': [MllamaSelfAttentionDecoderLayer, MllamaCrossAttentionDecoderLayer], 'attentions': [OutputRecorder(MllamaTextSelfAttention, index=1, layer_name='self_attn'), OutputRecorder(MllamaTextSelfAttention, index=1, layer_name='cross_attn'), OutputRecorder(MllamaTextCrossAttention, index=1, layer_name='cross_attn')]}
def _init_weights(self, module):
std = getattr(self.config, 'initializer_range', self.config.get_text_config().initializer_range)
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
elif isinstance(module, MllamaTextRMSNorm):
module.weight.data.fill_(1.0)
elif isinstance(module, MllamaVisionModel):
nn.init.normal_(module.class_embedding.data, std=std)
elif isinstance(module, MllamaPrecomputedPositionEmbedding):
nn.init.normal_(module.embedding.data, std=std)
nn.init.zeros_(module.gate.data)
elif isinstance(module, MllamaVisionEncoderLayer) and module.is_gated:
nn.init.normal_(module.gate_attn.data, std=std)
nn.init.normal_(module.gate_ffn.data, std=std)
elif isinstance(module, MllamaCrossAttentionDecoderLayer):
module.cross_attn_attn_gate.data.zero_()
module.cross_attn_mlp_gate.data.zero_()
elif isinstance(module, MllamaPrecomputedAspectRatioEmbedding):
if module.is_gated:
module.gate.data.zero_()
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
|
@auto_docstring
class MllamaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
| 6
| 1
| 46
| 3
| 33
| 10
| 7
| 0.3
| 1
| 9
| 6
| 4
| 2
| 0
| 3
| 3
| 156
| 11
| 113
| 41
| 92
| 34
| 59
| 24
| 55
| 9
| 1
| 2
| 22
|
3,837
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaPrecomputedAspectRatioEmbedding
|
from torch import nn
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
import torch.nn.functional as F
class MllamaPrecomputedAspectRatioEmbedding(nn.Module):
def __init__(self, config: MllamaVisionConfig, is_gated: bool=True):
super().__init__()
self.max_num_tiles = config.max_num_tiles
self.hidden_size = config.hidden_size
self.max_aspect_ratio_id = config.max_aspect_ratio_id
self.is_gated = is_gated
self.embedding = nn.Embedding(self.max_aspect_ratio_id + 1, self.max_num_tiles * self.hidden_size)
if is_gated:
self.gate = nn.Parameter(torch.zeros(1))
def forward(self, hidden_state: torch.Tensor, aspect_ratio_ids: torch.Tensor) -> torch.Tensor:
embeddings = self.embedding(aspect_ratio_ids)
embeddings = embeddings.reshape(-1, self.max_num_tiles, 1, self.hidden_size)
if self.is_gated:
embeddings = embeddings * self.gate.tanh()
hidden_state = hidden_state + embeddings
return hidden_state
|
class MllamaPrecomputedAspectRatioEmbedding(nn.Module):
def __init__(self, config: MllamaVisionConfig, is_gated: bool=True):
pass
def forward(self, hidden_state: torch.Tensor, aspect_ratio_ids: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 10
| 2
| 8
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 6
| 2
| 12
| 21
| 4
| 17
| 10
| 14
| 0
| 17
| 10
| 14
| 2
| 1
| 1
| 4
|
3,838
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaPrecomputedPositionEmbedding
|
from torch import nn
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
import torch.nn.functional as F
class MllamaPrecomputedPositionEmbedding(nn.Module):
def __init__(self, config: MllamaVisionConfig):
super().__init__()
self.max_num_tiles = config.max_num_tiles
self.max_aspect_ratio_id = config.max_aspect_ratio_id
self.num_patches = (config.image_size // config.patch_size) ** 2 + 1
self.hidden_size = config.hidden_size
self.scale = config.hidden_size ** (-0.5)
self.gate = nn.Parameter(torch.zeros(1))
position_embedding = torch.randn(self.num_patches, self.hidden_size)
self.embedding = nn.Parameter(self.scale * position_embedding)
self.tile_embedding = nn.Embedding(self.max_aspect_ratio_id + 1, self.max_num_tiles * self.num_patches * self.hidden_size)
def forward(self, hidden_state: torch.Tensor, aspect_ratio_ids: torch.Tensor) -> torch.Tensor:
gated_position_embedding = (1 - self.gate.tanh()) * self.embedding
hidden_state = hidden_state + gated_position_embedding.view(1, 1, self.num_patches, self.hidden_size)
tile_position_embedding = self.tile_embedding(aspect_ratio_ids)
batch_size = hidden_state.shape[0]
tile_position_embedding = tile_position_embedding.reshape(batch_size, self.max_num_tiles, self.num_patches, self.hidden_size)
gated_tile_position_embedding = self.gate.tanh() * tile_position_embedding
hidden_state = hidden_state + gated_tile_position_embedding
return hidden_state
|
class MllamaPrecomputedPositionEmbedding(nn.Module):
def __init__(self, config: MllamaVisionConfig):
pass
def forward(self, hidden_state: torch.Tensor, aspect_ratio_ids: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 17
| 3
| 12
| 2
| 1
| 0.16
| 1
| 3
| 1
| 0
| 2
| 8
| 2
| 12
| 35
| 6
| 25
| 16
| 22
| 4
| 21
| 16
| 18
| 1
| 1
| 0
| 2
|
3,839
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaRotaryEmbedding
|
from torch import nn
import torch.nn.functional as F
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
class MllamaRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: MllamaTextConfig, device=None):
super().__init__()
self.rope_type = config.rope_scaling['rope_type']
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class MllamaRotaryEmbedding(nn.Module):
def __init__(self, config: MllamaTextConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 16
| 2
| 12
| 4
| 2
| 0.29
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 53
| 7
| 38
| 21
| 33
| 11
| 35
| 20
| 31
| 3
| 1
| 1
| 7
|
3,840
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaSelfAttentionDecoderLayer
|
from typing import Callable, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
from ...utils.deprecation import deprecate_kwarg
from ...modeling_flash_attention_utils import FlashAttentionKwargs
import torch.nn.functional as F
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
class MllamaSelfAttentionDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MllamaTextConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MllamaTextSelfAttention(config=config, layer_idx=layer_idx)
self.mlp = MllamaTextMLP(config)
self.input_layernorm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layer_idx = layer_idx
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, cross_attention_states: Optional[torch.Tensor]=None, cross_attention_mask: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, full_text_row_masked_out_mask: Optional[tuple[torch.Tensor, torch.Tensor]]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Cache`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class MllamaSelfAttentionDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MllamaTextConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, cross_attention_states: Optional[torch.Tensor]=None, cross_attention_mask: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, full_text_row_masked_out_mask: Optional[tuple[torch.Tensor, torch.Tensor]]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Cache`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
'''
pass
| 4
| 1
| 40
| 5
| 23
| 13
| 2
| 0.54
| 1
| 8
| 4
| 0
| 2
| 6
| 2
| 12
| 81
| 11
| 46
| 25
| 30
| 25
| 24
| 12
| 21
| 3
| 1
| 1
| 4
|
3,841
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaTextCrossAttention
|
from typing import Callable, Optional, Union
from ...utils.deprecation import deprecate_kwarg
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
import torch.nn.functional as F
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
from ...cache_utils import Cache, DynamicCache
class MllamaTextCrossAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Optional[MllamaTextConfig]=None, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.num_heads = self.config.num_attention_heads
self.num_key_value_heads = self.config.num_key_value_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.head_dim = config.hidden_size // self.num_heads
self.layer_idx = layer_idx
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.q_norm = MllamaTextRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = MllamaTextRMSNorm(self.head_dim, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, cross_attention_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = self.q_norm(query_states)
if cross_attention_states is not None:
key_states = self.k_proj(cross_attention_states)
value_states = self.v_proj(cross_attention_states)
key_states = key_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2)
key_states = self.k_norm(key_states)
if past_key_values is not None:
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
elif cache_position[0] != 0:
key_states, value_states = (past_key_values.layers[self.layer_idx].keys, past_key_values.layers[self.layer_idx].values)
else:
raise ValueError("Cross attention layer can't find neither `cross_attn_states` nor cached values for key/values!")
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class MllamaTextCrossAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: Optional[MllamaTextConfig]=None, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, cross_attention_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 40
| 5
| 34
| 2
| 4
| 0.07
| 1
| 8
| 3
| 1
| 2
| 14
| 2
| 12
| 84
| 11
| 69
| 37
| 53
| 5
| 47
| 24
| 44
| 6
| 1
| 2
| 7
|
3,842
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaTextMLP
|
from torch import nn
from ...activations import ACT2FN
class MllamaTextMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class MllamaTextMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 7
| 0
| 6
| 1
| 1
| 0.08
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 15
| 1
| 13
| 11
| 10
| 1
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
3,843
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaTextModel
|
import torch.nn.functional as F
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
from ...utils.generic import OutputRecorder, check_model_inputs
from typing import Callable, Optional, Union
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from torch import nn
@auto_docstring(custom_intro='\n The Mllama Text Model which consists of transformer with self and cross attention layers.\n ')
class MllamaTextModel(MllamaPreTrainedModel):
config: MllamaTextConfig
base_model_prefix = 'language_model.model'
def __init__(self, config: MllamaTextConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size + 8, config.hidden_size, self.padding_idx)
self.cross_attention_layers = config.cross_attention_layers
layers = []
for layer_idx in range(config.num_hidden_layers):
if layer_idx in self.cross_attention_layers:
layers.append(MllamaCrossAttentionDecoderLayer(config, layer_idx))
else:
layers.append(MllamaSelfAttentionDecoderLayer(config, layer_idx))
self.layers = nn.ModuleList(layers)
self.norm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = MllamaRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, cross_attention_states: Optional[torch.FloatTensor]=None, cross_attention_mask: Optional[torch.Tensor]=None, full_text_row_masked_out_mask: Optional[tuple[torch.Tensor, torch.Tensor]]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> BaseModelOutputWithPast:
"""
cross_attention_states (`torch.FloatTensor`, *optional*):
Output of the vision model, used for cross-attention. This tensor contains the processed image features that
the language model will attend to.
cross_attention_mask (`torch.Tensor` of shape `(batch_size, seq_length, max_num_images, max_num_tiles)`, *optional*):
Cross-attention mask to control the interaction between text tokens and image tiles.
This 4D tensor defines which image tiles each text token should attend to.
For each text token (in seq_length):
- 1 indicates the token **should attend** to the corresponding image tile
- 0 indicates the token **should not attend** to the corresponding image tile
full_text_row_masked_out_mask (`tuple[torch.Tensor, torch.Tensor]`, *optional*):
A tuple containing two tensors that mask out rows in the cross-attention mechanism:
- The first tensor has shape `(batch_size, 1, seq_length, 1)` and contains values of 0 or 1.
A value of 0 indicates that the corresponding text token's entire row in the cross-attention
matrix should be masked out (all image tokens ignored).
- The second tensor has the same shape and is used internally to apply the masking during
the forward pass of cross-attention layers.
This mask is derived from the cross_attention_mask and is used to handle cases where a text token
should not attend to any image token.
Example:
```python
>>> from transformers import AutoProcessor, MllamaTextModel
>>> checkpoint = "meta-llama/Llama-3.2-11B-Vision"
>>> model = MllamaTextModel.from_pretrained(checkpoint)
>>> processor = AutoProcessor.from_pretrained(checkpoint)
>>> text = "<|image|>If I had to write a haiku for this one"
>>> inputs = processor(text=text, return_tensors="pt")
>>> output = model(**inputs)
>>> print(output.last_hidden_state.shape)
torch.Size([1, 13, 4096])
```
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for idx, decoder_layer in enumerate(self.layers):
is_cross_attention_layer = idx in self.cross_attention_layers
is_cross_attention_cache_empty = past_key_values is None or (past_key_values is not None and past_key_values.get_seq_length(idx) == 0)
if is_cross_attention_layer and cross_attention_states is None and is_cross_attention_cache_empty:
continue
hidden_states = decoder_layer(hidden_states, cross_attention_states=cross_attention_states, cross_attention_mask=cross_attention_mask, attention_mask=causal_mask, full_text_row_masked_out_mask=full_text_row_masked_out_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
@auto_docstring(custom_intro='\n The Mllama Text Model which consists of transformer with self and cross attention layers.\n ')
class MllamaTextModel(MllamaPreTrainedModel):
def __init__(self, config: MllamaTextConfig):
pass
@check_model_inputs
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, cross_attention_states: Optional[torch.FloatTensor]=None, cross_attention_mask: Optional[torch.Tensor]=None, full_text_row_masked_out_mask: Optional[tuple[torch.Tensor, torch.Tensor]]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> BaseModelOutputWithPast:
'''
cross_attention_states (`torch.FloatTensor`, *optional*):
Output of the vision model, used for cross-attention. This tensor contains the processed image features that
the language model will attend to.
cross_attention_mask (`torch.Tensor` of shape `(batch_size, seq_length, max_num_images, max_num_tiles)`, *optional*):
Cross-attention mask to control the interaction between text tokens and image tiles.
This 4D tensor defines which image tiles each text token should attend to.
For each text token (in seq_length):
- 1 indicates the token **should attend** to the corresponding image tile
- 0 indicates the token **should not attend** to the corresponding image tile
full_text_row_masked_out_mask (`tuple[torch.Tensor, torch.Tensor]`, *optional*):
A tuple containing two tensors that mask out rows in the cross-attention mechanism:
- The first tensor has shape `(batch_size, 1, seq_length, 1)` and contains values of 0 or 1.
A value of 0 indicates that the corresponding text token's entire row in the cross-attention
matrix should be masked out (all image tokens ignored).
- The second tensor has the same shape and is used internally to apply the masking during
the forward pass of cross-attention layers.
This mask is derived from the cross_attention_mask and is used to handle cases where a text token
should not attend to any image token.
Example:
```python
>>> from transformers import AutoProcessor, MllamaTextModel
>>> checkpoint = "meta-llama/Llama-3.2-11B-Vision"
>>> model = MllamaTextModel.from_pretrained(checkpoint)
>>> processor = AutoProcessor.from_pretrained(checkpoint)
>>> text = "<|image|>If I had to write a haiku for this one"
>>> inputs = processor(text=text, return_tensors="pt")
>>> output = model(**inputs)
>>> print(output.last_hidden_state.shape)
torch.Size([1, 13, 4096])
```
'''
pass
| 7
| 1
| 44
| 7
| 31
| 5
| 7
| 0.16
| 1
| 15
| 8
| 0
| 4
| 8
| 4
| 7
| 183
| 33
| 129
| 45
| 107
| 21
| 70
| 29
| 65
| 24
| 2
| 2
| 29
|
3,844
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaTextRMSNorm
|
from torch import nn
import torch
import torch.nn.functional as F
class MllamaTextRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
MllamaTextRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
class MllamaTextRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
MllamaTextRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 4
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
3,845
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaTextSelfAttention
|
from typing import Callable, Optional, Union
from ...utils.deprecation import deprecate_kwarg
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
import torch.nn.functional as F
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
class MllamaTextSelfAttention(nn.Module):
def __init__(self, config: MllamaTextConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor, use_cache: bool=False, past_key_values=None, cache_position=None, **kwargs):
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class MllamaTextSelfAttention(nn.Module):
def __init__(self, config: MllamaTextConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor, use_cache: bool=False, past_key_values=None, cache_position=None, **kwargs):
pass
| 4
| 0
| 34
| 7
| 27
| 2
| 3
| 0.06
| 1
| 5
| 1
| 1
| 2
| 13
| 2
| 12
| 70
| 14
| 54
| 35
| 41
| 3
| 44
| 25
| 41
| 4
| 1
| 1
| 5
|
3,846
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaVisionAttention
|
from typing import Callable, Optional, Union
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
import torch.nn.functional as F
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
class MllamaVisionAttention(nn.Module):
def __init__(self, config: MllamaVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.attention_heads
self.head_dim = config.hidden_size // config.attention_heads
self.scaling = self.head_dim ** (-0.5)
self.num_key_value_groups = 1
self.q_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.embed_dim, bias=False)
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
query = self.q_proj(hidden_state)
key = self.k_proj(hidden_state)
value = self.v_proj(hidden_state)
batch_size, q_seq_len, _ = query.shape
_, kv_seq_len, _ = key.shape
query = query.view(batch_size, q_seq_len, self.num_heads, self.head_dim).transpose(1, 2)
key = key.view(batch_size, kv_seq_len, self.num_heads, self.head_dim).transpose(1, 2)
value = value.view(batch_size, kv_seq_len, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query, key, value, attention_mask, dropout=0.0, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(batch_size, q_seq_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class MllamaVisionAttention(nn.Module):
def __init__(self, config: MllamaVisionConfig):
pass
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
pass
| 3
| 0
| 24
| 6
| 18
| 1
| 2
| 0.06
| 1
| 4
| 1
| 1
| 2
| 7
| 2
| 12
| 49
| 12
| 36
| 24
| 28
| 2
| 31
| 19
| 28
| 3
| 1
| 1
| 4
|
3,847
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaVisionEncoder
|
from typing import Callable, Optional, Union
from torch import nn
import torch.nn.functional as F
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast
class MllamaVisionEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`MllamaEncoderLayer`].
Args:
config: MllamaConfig
"""
def __init__(self, config: MllamaVisionConfig, num_layers=32, is_gated=False):
super().__init__()
self.config = config
self.layers = nn.ModuleList([MllamaVisionEncoderLayer(config, is_gated) for _ in range(num_layers)])
self.gradient_checkpointing = False
self.config = config
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None) -> BaseModelOutput:
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
encoder_states = ()
for encoder_layer in self.layers:
hidden_states = encoder_layer(hidden_state=hidden_states, attention_mask=attention_mask)
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states)
|
class MllamaVisionEncoder(nn.Module):
'''
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`MllamaEncoderLayer`].
Args:
config: MllamaConfig
'''
def __init__(self, config: MllamaVisionConfig, num_layers=32, is_gated=False):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None) -> BaseModelOutput:
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
'''
pass
| 3
| 2
| 38
| 4
| 24
| 10
| 7
| 0.54
| 1
| 8
| 3
| 0
| 2
| 3
| 2
| 12
| 85
| 11
| 48
| 17
| 38
| 26
| 27
| 10
| 24
| 12
| 1
| 2
| 13
|
3,848
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaVisionEncoderLayer
|
from typing import Callable, Optional, Union
import math
from torch import nn
import torch.nn.functional as F
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
class MllamaVisionEncoderLayer(nn.Module):
def __init__(self, config: MllamaVisionConfig, is_gated: bool=False):
super().__init__()
self.hidden_size = config.hidden_size
self.num_attention_heads = config.attention_heads
self.is_gated = is_gated
self.intermediate_size = config.intermediate_size
self.self_attn = MllamaVisionAttention(config)
self.mlp = MllamaVisionMLP(config)
self.input_layernorm = nn.LayerNorm(self.hidden_size, eps=config.norm_eps)
self.post_attention_layernorm = nn.LayerNorm(self.hidden_size, eps=config.norm_eps)
if is_gated:
self.gate_attn = nn.Parameter(torch.ones(1) * math.pi / 4)
self.gate_ffn = nn.Parameter(torch.ones(1) * math.pi / 4)
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor]=None):
residual = hidden_state
hidden_state = self.input_layernorm(hidden_state)
hidden_state, attn_weights = self.self_attn(hidden_state, attention_mask=attention_mask)
if self.is_gated:
hidden_state = self.gate_attn.tanh() * hidden_state
hidden_state = residual + hidden_state
residual = hidden_state
hidden_state = self.post_attention_layernorm(hidden_state)
hidden_state = self.mlp(hidden_state)
if self.is_gated:
hidden_state = self.gate_ffn.tanh() * hidden_state
hidden_state = residual + hidden_state
return hidden_state
|
class MllamaVisionEncoderLayer(nn.Module):
def __init__(self, config: MllamaVisionConfig, is_gated: bool=False):
pass
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor]=None):
pass
| 3
| 0
| 23
| 4
| 18
| 1
| 3
| 0.06
| 1
| 5
| 2
| 0
| 2
| 10
| 2
| 12
| 47
| 9
| 36
| 21
| 28
| 2
| 31
| 16
| 28
| 4
| 1
| 1
| 6
|
3,849
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaVisionMLP
|
from torch import nn
from ...activations import ACT2FN
import torch
import torch.nn.functional as F
class MllamaVisionMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class MllamaVisionMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
3,850
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/modeling_mllama.py
|
transformers.models.mllama.modeling_mllama.MllamaVisionModel
|
from torch import nn
import torch.nn.functional as F
from .configuration_mllama import MllamaConfig, MllamaTextConfig, MllamaVisionConfig
import torch
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast
from ...utils.generic import OutputRecorder, check_model_inputs
@auto_docstring(custom_intro='\n The Mllama Vision Model which consists of two vision encoders.\n ')
class MllamaVisionModel(MllamaPreTrainedModel):
config: MllamaVisionConfig
base_model_prefix = 'vision_model'
def __init__(self, config: MllamaVisionConfig):
super().__init__(config)
self.image_size = config.image_size
self.patch_size = config.patch_size
self.max_num_tiles = config.max_num_tiles
self.hidden_size = config.hidden_size
self.num_channels = config.num_channels
self.intermediate_layers_indices = config.intermediate_layers_indices
self.num_patches = (self.image_size // self.patch_size) ** 2 + 1
self.scale = config.hidden_size ** (-0.5)
self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.hidden_size, kernel_size=self.patch_size, stride=self.patch_size, padding='valid', bias=False)
self.class_embedding = nn.Parameter(self.scale * torch.randn(self.hidden_size))
self.gated_positional_embedding = MllamaPrecomputedPositionEmbedding(config)
self.pre_tile_positional_embedding = MllamaPrecomputedAspectRatioEmbedding(config, is_gated=True)
self.post_tile_positional_embedding = MllamaPrecomputedAspectRatioEmbedding(config, is_gated=True)
self.layernorm_pre = nn.LayerNorm(self.hidden_size)
self.layernorm_post = nn.LayerNorm(self.hidden_size)
self.transformer = MllamaVisionEncoder(config, config.num_hidden_layers, is_gated=False)
self.global_transformer = MllamaVisionEncoder(config, config.num_global_layers, is_gated=True)
self.post_init()
def get_input_embeddings(self):
"""
This function is used to fetch the first embedding layer to activate grads on inputs.
"""
return self.patch_embedding
def apply_class_embedding(self, hidden_state: torch.Tensor) -> torch.Tensor:
batch_size, _, hidden_size = hidden_state.shape
class_embedding = self.class_embedding.expand(batch_size, 1, hidden_size)
hidden_state = torch.cat([class_embedding, hidden_state], dim=1)
return hidden_state
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: torch.Tensor, aspect_ratio_ids: torch.Tensor, aspect_ratio_mask: torch.Tensor, **kwargs) -> BaseModelOutput:
"""
aspect_ratio_ids (`torch.Tensor` of shape `(batch_size, max_num_images)`, *optional*):
Aspect ratio ids used to select the appropriate precomputed tile embeddings based on the aspect ratio of each input image.
These ids correspond to indices in the model's list of supported aspect ratios, offset by 1.
For example, if the model supports aspect ratios [[1, 1], [1, 2], [2, 1]]:
- An image with aspect ratio [1, 1] would have ID 1
- An image with aspect ratio [1, 2] would have ID 2
- An image with aspect ratio [2, 1] would have ID 3
The id 0 is reserved for padding (i.e., no image).
If an image has aspect ratio [1, 2], that means it was split into 2 tiles horizontally, and its `aspect_ratio_id` would be 2.
aspect_ratio_mask (`torch.Tensor` of shape `(batch_size, max_num_images, max_num_tiles)`, *optional*):
Mask to avoid performing attention on padding tiles. Mask values selected in `[0, 1]`:
- 1 for tiles that are **not masked**,
- 0 for tiles that are **masked**.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, MllamaVisionModel
>>> checkpoint = "meta-llama/Llama-3.2-11B-Vision"
>>> model = MllamaVisionModel.from_pretrained(checkpoint)
>>> processor = AutoProcessor.from_pretrained(checkpoint)
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> output = model(**inputs)
>>> print(output.last_hidden_state.shape)
torch.Size([1, 1, 4, 1025, 7680])
```
"""
batch_size, num_concurrent_media, num_tiles, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * num_concurrent_media * num_tiles, num_channels, height, width)
aspect_ratio_ids = aspect_ratio_ids.reshape(batch_size * num_concurrent_media, -1)
target_dtype = self.patch_embedding.weight.dtype
target_device = self.patch_embedding.weight.device
patch_embeds = self.patch_embedding(pixel_values.to(target_device, target_dtype))
hidden_state = patch_embeds.flatten(2).transpose(1, 2)
_, num_patches, dim = hidden_state.shape
hidden_state = hidden_state.reshape(batch_size * num_concurrent_media, num_tiles, -1, dim)
hidden_state = self.pre_tile_positional_embedding(hidden_state, aspect_ratio_ids)
hidden_state = hidden_state.reshape(batch_size * num_concurrent_media * num_tiles, num_patches, dim)
hidden_state = self.apply_class_embedding(hidden_state)
num_patches += 1
hidden_state = hidden_state.reshape(batch_size * num_concurrent_media, num_tiles, num_patches, dim)
hidden_state = self.gated_positional_embedding(hidden_state, aspect_ratio_ids)
hidden_state = self.layernorm_pre(hidden_state)
num_padding_patches = (8 - hidden_state.shape[-2] % 8) % 8
padding = (0, 0, 0, num_padding_patches)
hidden_state = F.pad(hidden_state, padding, mode='constant', value=0)
slice_index = -num_padding_patches if num_padding_patches > 0 else None
attention_mask = aspect_ratio_mask.reshape(batch_size * num_concurrent_media, -1)
attention_mask = _prepare_aspect_ratio_attention_mask(aspect_ratio_mask=attention_mask, num_patches=self.num_patches, target_length=hidden_state.shape[2], dtype=self.dtype)
hidden_state = hidden_state.view(batch_size * num_concurrent_media, -1, dim)
output = self.transformer(hidden_state, attention_mask=attention_mask)
hidden_state = output.last_hidden_state
hidden_state = self.layernorm_post(hidden_state)
hidden_state = hidden_state.reshape(batch_size * num_concurrent_media, num_tiles, num_patches + num_padding_patches, dim)
hidden_state = self.post_tile_positional_embedding(hidden_state, aspect_ratio_ids)
hidden_state = hidden_state.reshape(batch_size * num_concurrent_media, num_tiles * (num_patches + num_padding_patches), dim)
global_output = self.global_transformer(hidden_state, attention_mask=attention_mask)
hidden_state = global_output.last_hidden_state
hidden_state = hidden_state.reshape(batch_size * num_concurrent_media, num_tiles, num_patches + num_padding_patches, dim)
hidden_state = hidden_state[:, :, :slice_index]
hidden_state = hidden_state.reshape(batch_size, num_concurrent_media, num_tiles, num_patches, dim)
all_intermediate_hidden_states = [output.hidden_states[i] for i in self.intermediate_layers_indices]
intermediate_hidden_states = torch.stack(all_intermediate_hidden_states, dim=-1)
intermediate_hidden_states = intermediate_hidden_states.reshape(batch_size * num_concurrent_media, num_tiles, num_patches + num_padding_patches, -1)
intermediate_hidden_states = intermediate_hidden_states[:, :, :slice_index]
intermediate_hidden_states = intermediate_hidden_states.reshape(batch_size, num_concurrent_media, num_tiles, num_patches, -1)
hidden_state = torch.cat([hidden_state, intermediate_hidden_states], dim=-1)
return BaseModelOutput(last_hidden_state=hidden_state)
|
@auto_docstring(custom_intro='\n The Mllama Vision Model which consists of two vision encoders.\n ')
class MllamaVisionModel(MllamaPreTrainedModel):
def __init__(self, config: MllamaVisionConfig):
pass
def get_input_embeddings(self):
'''
This function is used to fetch the first embedding layer to activate grads on inputs.
'''
pass
def apply_class_embedding(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: torch.Tensor, aspect_ratio_ids: torch.Tensor, aspect_ratio_mask: torch.Tensor, **kwargs) -> BaseModelOutput:
'''
aspect_ratio_ids (`torch.Tensor` of shape `(batch_size, max_num_images)`, *optional*):
Aspect ratio ids used to select the appropriate precomputed tile embeddings based on the aspect ratio of each input image.
These ids correspond to indices in the model's list of supported aspect ratios, offset by 1.
For example, if the model supports aspect ratios [[1, 1], [1, 2], [2, 1]]:
- An image with aspect ratio [1, 1] would have ID 1
- An image with aspect ratio [1, 2] would have ID 2
- An image with aspect ratio [2, 1] would have ID 3
The id 0 is reserved for padding (i.e., no image).
If an image has aspect ratio [1, 2], that means it was split into 2 tiles horizontally, and its `aspect_ratio_id` would be 2.
aspect_ratio_mask (`torch.Tensor` of shape `(batch_size, max_num_images, max_num_tiles)`, *optional*):
Mask to avoid performing attention on padding tiles. Mask values selected in `[0, 1]`:
- 1 for tiles that are **not masked**,
- 0 for tiles that are **masked**.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, MllamaVisionModel
>>> checkpoint = "meta-llama/Llama-3.2-11B-Vision"
>>> model = MllamaVisionModel.from_pretrained(checkpoint)
>>> processor = AutoProcessor.from_pretrained(checkpoint)
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> output = model(**inputs)
>>> print(output.last_hidden_state.shape)
torch.Size([1, 1, 4, 1025, 7680])
```
'''
pass
| 8
| 2
| 50
| 9
| 32
| 10
| 3
| 0.3
| 1
| 9
| 5
| 0
| 4
| 17
| 4
| 7
| 208
| 38
| 132
| 50
| 117
| 39
| 82
| 41
| 77
| 9
| 2
| 1
| 12
|
3,851
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/processing_mllama.py
|
transformers.models.mllama.processing_mllama.MllamaImagesKwargs
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from typing import Optional, Union
class MllamaImagesKwargs(ImagesKwargs, total=False):
max_image_tiles: Optional[int]
|
class MllamaImagesKwargs(ImagesKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
3,852
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/processing_mllama.py
|
transformers.models.mllama.processing_mllama.MllamaProcessor
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from ...feature_extraction_utils import BatchFeature
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...image_utils import ImageInput, make_nested_list_of_images
from typing import Optional, Union
class MllamaProcessor(ProcessorMixin):
"""
Constructs a Mllama processor which wraps [`MllamaImageProcessor`] and
[`PretrainedTokenizerFast`] into a single processor that inherits both the image processor and
tokenizer functionalities. See the [`~MllamaProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more
information.
The preferred way of passing kwargs is as a dictionary per modality, see usage example below.
```python
from transformers import MllamaProcessor
from PIL import Image
processor = MllamaProcessor.from_pretrained("meta-llama/Llama-3.2-11B-Vision")
processor(
images=your_pil_image,
text=["<|image|>If I had to write a haiku for this one"],
images_kwargs = {"size": {"height": 448, "width": 448}},
text_kwargs = {"padding": "right"},
common_kwargs = {"return_tensors": "pt"},
)
```
Args:
image_processor ([`MllamaImageProcessor`]):
The image processor is a required input.
tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'MllamaImageProcessor'
tokenizer_class = 'PreTrainedTokenizerFast'
def __init__(self, image_processor, tokenizer, chat_template=None):
if not hasattr(tokenizer, 'image_token'):
self.image_token = '<|image|>'
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
else:
self.image_token = tokenizer.image_token
self.image_token_id = tokenizer.image_token_id
self.python_token = '<|python_tag|>'
self.python_token_id = tokenizer.convert_tokens_to_ids(self.python_token)
self.bos_token = tokenizer.bos_token
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[MllamaProcessorKwargs]) -> BatchFeature:
"""
Main method to prepare text(s) and image(s) to be fed as input to the model. This method forwards the `text`
arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` arguments to
MllamaImageProcessor's [`~MllamaImageProcessor.__call__`] if `images` is not `None`. Please refer
to the docstring of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
TODO: add aspect_ratio_ids and aspect_ratio_mask and cross_attention_mask
"""
if text is None and images is None:
raise ValueError('You must specify either text or images.')
output_kwargs = self._merge_kwargs(MllamaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
text_kwargs = output_kwargs['text_kwargs']
text_kwargs['return_tensors'] = None
images_kwargs = output_kwargs['images_kwargs']
common_kwargs = output_kwargs['common_kwargs']
data = {}
if text is not None:
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all((isinstance(t, str) for t in text))):
raise ValueError('Invalid input text. Please provide a string, or a list of strings')
n_images_in_text = [t.count(self.image_token) for t in text]
text = [build_string_from_input(text_item, self.bos_token, self.image_token) for text_item in text]
_ = text_kwargs.pop('padding_side', None)
encoding = self.tokenizer(text, **text_kwargs)
self._check_special_mm_tokens(text, encoding, modalities=['image'])
n_images_in_ids = [token_ids.count(self.image_token_id) for token_ids in encoding['input_ids']]
data.update(encoding)
n_images_in_images = [0]
if images is not None:
images = self.image_processor.fetch_images(images)
images = make_nested_list_of_images(images)
n_images_in_images = [len(sample) for sample in images]
if text is not None:
if any((batch_img == 0 for batch_img in n_images_in_text)) and (not all((batch_img == 0 for batch_img in n_images_in_text))):
raise ValueError('If a batch of text is provided, there should be either no images or at least one image per sample')
if sum(n_images_in_text) > 0 and (n_images_in_images != n_images_in_text or n_images_in_ids != n_images_in_images):
if images is None:
raise ValueError('No image were provided, but there are image tokens in the prompt')
else:
add_message = ''
if sum(n_images_in_images) == sum(n_images_in_text) and n_images_in_images != n_images_in_text:
add_message = 'Make sure to pass your images as a nested list, where each sub-list holds images per batch'
elif n_images_in_ids != n_images_in_images:
add_message = "If you activated truncation with `max_length`, increase the `max_length` so image tokens aren't cropped."
raise ValueError(f'The number of image tokens in each text ({n_images_in_text}) should be the same as the number of provided images per batch ({n_images_in_images}). {add_message}')
if images is not None:
image_features = self.image_processor(images, **images_kwargs)
num_tiles = image_features.pop('num_tiles')
data.update(image_features)
if images is not None and text is not None:
cross_attention_token_mask = [get_cross_attention_token_mask(token_ids, self.image_token_id) for token_ids in encoding['input_ids']]
cross_attention_mask = convert_sparse_cross_attention_mask_to_dense(cross_attention_token_mask, num_tiles=num_tiles, max_num_tiles=self.image_processor.max_image_tiles, length=max((len(input_ids) for input_ids in encoding['input_ids'])))
data['cross_attention_mask'] = cross_attention_mask
return_tensors = common_kwargs.pop('return_tensors', None)
batch_feature = BatchFeature(data=data, tensor_type=return_tensors)
return batch_feature
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs):
"""
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
"""
return self.tokenizer.batch_decode(generated_outputs, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
image_processor_input_names = [name for name in image_processor_input_names if name != 'num_tiles']
return list(tokenizer_input_names + image_processor_input_names + ['cross_attention_mask'])
|
class MllamaProcessor(ProcessorMixin):
'''
Constructs a Mllama processor which wraps [`MllamaImageProcessor`] and
[`PretrainedTokenizerFast`] into a single processor that inherits both the image processor and
tokenizer functionalities. See the [`~MllamaProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more
information.
The preferred way of passing kwargs is as a dictionary per modality, see usage example below.
```python
from transformers import MllamaProcessor
from PIL import Image
processor = MllamaProcessor.from_pretrained("meta-llama/Llama-3.2-11B-Vision")
processor(
images=your_pil_image,
text=["<|image|>If I had to write a haiku for this one"],
images_kwargs = {"size": {"height": 448, "width": 448}},
text_kwargs = {"padding": "right"},
common_kwargs = {"return_tensors": "pt"},
)
```
Args:
image_processor ([`MllamaImageProcessor`]):
The image processor is a required input.
tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
'''
def __init__(self, image_processor, tokenizer, chat_template=None):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[MllamaProcessorKwargs]) -> BatchFeature:
'''
Main method to prepare text(s) and image(s) to be fed as input to the model. This method forwards the `text`
arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` arguments to
MllamaImageProcessor's [`~MllamaImageProcessor.__call__`] if `images` is not `None`. Please refer
to the docstring of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
TODO: add aspect_ratio_ids and aspect_ratio_mask and cross_attention_mask
'''
pass
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs):
'''
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
'''
pass
@property
def model_input_names(self):
pass
| 6
| 3
| 25
| 2
| 15
| 8
| 3
| 0.77
| 1
| 7
| 2
| 0
| 6
| 6
| 6
| 23
| 190
| 25
| 94
| 41
| 79
| 72
| 64
| 33
| 57
| 12
| 2
| 3
| 18
|
3,853
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mllama/processing_mllama.py
|
transformers.models.mllama.processing_mllama.MllamaProcessorKwargs
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
class MllamaProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: MllamaImagesKwargs
_defaults = {'image_kwargs': {'max_image_tiles': 4}}
|
class MllamaProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 1
| 7
| 2
| 6
| 0
| 3
| 2
| 2
| 0
| 3
| 0
| 0
|
3,854
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mluke/tokenization_mluke.py
|
transformers.models.mluke.tokenization_mluke.MLukeTokenizer
|
import json
import sentencepiece as spm
import itertools
from typing import Any, Optional, Union
import numpy as np
from shutil import copyfile
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, PaddingStrategy, TensorType, TextInput, TextInputPair, TruncationStrategy, to_py_obj
from ...utils.import_utils import requires
import os
from ...utils import add_end_docstrings, is_torch_tensor, logging
from collections.abc import Mapping
@requires(backends=('sentencepiece',))
class MLukeTokenizer(PreTrainedTokenizer):
"""
Adapted from [`XLMRobertaTokenizer`] and [`LukeTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
entity_vocab_file (`str`):
Path to the entity vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
task (`str`, *optional*):
Task for which you want to prepare sequences. One of `"entity_classification"`,
`"entity_pair_classification"`, or `"entity_span_classification"`. If you specify this argument, the entity
sequence is automatically created based on the given entity span(s).
max_entity_length (`int`, *optional*, defaults to 32):
The maximum length of `entity_ids`.
max_mention_length (`int`, *optional*, defaults to 30):
The maximum number of tokens inside an entity span.
entity_token_1 (`str`, *optional*, defaults to `<ent>`):
The special token used to represent an entity span in a word token sequence. This token is only used when
`task` is set to `"entity_classification"` or `"entity_pair_classification"`.
entity_token_2 (`str`, *optional*, defaults to `<ent2>`):
The special token used to represent an entity span in a word token sequence. This token is only used when
`task` is set to `"entity_pair_classification"`.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, entity_vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', task=None, max_entity_length=32, max_mention_length=30, entity_token_1='<ent>', entity_token_2='<ent2>', entity_unk_token='[UNK]', entity_pad_token='[PAD]', entity_mask_token='[MASK]', entity_mask2_token='[MASK2]', sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
entity_token_1 = AddedToken(entity_token_1, lstrip=False, rstrip=False) if isinstance(entity_token_1, str) else entity_token_1
entity_token_2 = AddedToken(entity_token_2, lstrip=False, rstrip=False) if isinstance(entity_token_2, str) else entity_token_2
additional_special_tokens = kwargs.pop('additional_special_tokens', [])
additional_special_tokens += [entity_token_1, entity_token_2]
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
self.fairseq_offset = 1
self.fairseq_tokens_to_ids['<mask>'] = len(self.sp_model) + self.fairseq_offset
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
with open(entity_vocab_file, encoding='utf-8') as entity_vocab_handle:
self.entity_vocab = json.load(entity_vocab_handle)
for entity_special_token in [entity_unk_token, entity_pad_token, entity_mask_token, entity_mask2_token]:
if entity_special_token not in self.entity_vocab:
raise ValueError(f'Specified entity special token ``{entity_special_token}`` is not found in entity_vocab. Probably an incorrect entity vocab file is loaded: {entity_vocab_file}.')
self.entity_unk_token_id = self.entity_vocab[entity_unk_token]
self.entity_pad_token_id = self.entity_vocab[entity_pad_token]
self.entity_mask_token_id = self.entity_vocab[entity_mask_token]
self.entity_mask2_token_id = self.entity_vocab[entity_mask2_token]
self.task = task
if task is None or task == 'entity_span_classification':
self.max_entity_length = max_entity_length
elif task == 'entity_classification':
self.max_entity_length = 1
elif task == 'entity_pair_classification':
self.max_entity_length = 2
else:
raise ValueError(f"Task {task} not supported. Select task from ['entity_classification', 'entity_pair_classification', 'entity_span_classification'] only.")
self.max_mention_length = max_mention_length
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, task=task, max_entity_length=max_entity_length, max_mention_length=max_mention_length, entity_token_1=entity_token_1, entity_token_2=entity_token_2, entity_unk_token=entity_unk_token, entity_pad_token=entity_pad_token, entity_mask_token=entity_mask_token, entity_mask2_token=entity_mask2_token, additional_special_tokens=additional_special_tokens, **kwargs)
@property
def vocab_size(self):
return len(self.sp_model) + self.fairseq_offset + 1
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> list[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
state['sp_model_proto'] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__ = d
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, list[TextInput]], text_pair: Optional[Union[TextInput, list[TextInput]]]=None, entity_spans: Optional[Union[EntitySpanInput, list[EntitySpanInput]]]=None, entity_spans_pair: Optional[Union[EntitySpanInput, list[EntitySpanInput]]]=None, entities: Optional[Union[EntityInput, list[EntityInput]]]=None, entities_pair: Optional[Union[EntityInput, list[EntityInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences, depending on the task you want to prepare them for.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
tokenizer does not support tokenization based on pretokenized strings.
text_pair (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
tokenizer does not support tokenization based on pretokenized strings.
entity_spans (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*):
The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
with two integers denoting character-based start and end positions of entities. If you specify
`"entity_classification"` or `"entity_pair_classification"` as the `task` argument in the constructor,
the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each
sequence must be equal to the length of each sequence of `entities`.
entity_spans_pair (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*):
The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
with two integers denoting character-based start and end positions of entities. If you specify the
`task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the
length of each sequence must be equal to the length of each sequence of `entities_pair`.
entities (`list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
each sequence must be equal to the length of each sequence of `entity_spans`. If you specify
`entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences
is automatically constructed by filling it with the [MASK] entity.
entities_pair (`list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify
`entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity
sequences is automatically constructed by filling it with the [MASK] entity.
max_entity_length (`int`, *optional*):
The maximum length of `entity_ids`.
"""
is_valid_single_text = isinstance(text, str)
is_valid_batch_text = isinstance(text, (list, tuple)) and (len(text) == 0 or isinstance(text[0], str))
if not (is_valid_single_text or is_valid_batch_text):
raise ValueError('text input must be of type `str` (single example) or `list[str]` (batch).')
is_valid_single_text_pair = isinstance(text_pair, str)
is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and (len(text_pair) == 0 or isinstance(text_pair[0], str))
if not (text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair):
raise ValueError('text_pair input must be of type `str` (single example) or `list[str]` (batch).')
is_batched = bool(isinstance(text, (list, tuple)))
if is_batched:
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
if entities is None:
batch_entities_or_entities_pairs = None
else:
batch_entities_or_entities_pairs = list(zip(entities, entities_pair)) if entities_pair is not None else entities
if entity_spans is None:
batch_entity_spans_or_entity_spans_pairs = None
else:
batch_entity_spans_or_entity_spans_pairs = list(zip(entity_spans, entity_spans_pair)) if entity_spans_pair is not None else entity_spans
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs, batch_entities_or_entities_pairs=batch_entities_or_entities_pairs, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, entities=entities, entities_pair=entities_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _encode_plus(self, text: Union[TextInput], text_pair: Optional[Union[TextInput]]=None, entity_spans: Optional[EntitySpanInput]=None, entity_spans_pair: Optional[EntitySpanInput]=None, entities: Optional[EntityInput]=None, entities_pair: Optional[EntityInput]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674')
if is_split_into_words:
raise NotImplementedError('is_split_into_words is not supported in this tokenizer.')
first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans = self._create_input_sequence(text=text, text_pair=text_pair, entities=entities, entities_pair=entities_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, **kwargs)
return self.prepare_for_model(first_ids, pair_ids=second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair]], batch_entity_spans_or_entity_spans_pairs: Optional[Union[list[EntitySpanInput], list[tuple[EntitySpanInput, EntitySpanInput]]]]=None, batch_entities_or_entities_pairs: Optional[Union[list[EntityInput], list[tuple[EntityInput, EntityInput]]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.')
if is_split_into_words:
raise NotImplementedError('is_split_into_words is not supported in this tokenizer.')
input_ids = []
entity_ids = []
entity_token_spans = []
for index, text_or_text_pair in enumerate(batch_text_or_text_pairs):
if not isinstance(text_or_text_pair, (list, tuple)):
text, text_pair = (text_or_text_pair, None)
else:
text, text_pair = text_or_text_pair
entities, entities_pair = (None, None)
if batch_entities_or_entities_pairs is not None:
entities_or_entities_pairs = batch_entities_or_entities_pairs[index]
if entities_or_entities_pairs:
if isinstance(entities_or_entities_pairs[0], str):
entities, entities_pair = (entities_or_entities_pairs, None)
else:
entities, entities_pair = entities_or_entities_pairs
entity_spans, entity_spans_pair = (None, None)
if batch_entity_spans_or_entity_spans_pairs is not None:
entity_spans_or_entity_spans_pairs = batch_entity_spans_or_entity_spans_pairs[index]
if len(entity_spans_or_entity_spans_pairs) > 0 and isinstance(entity_spans_or_entity_spans_pairs[0], list):
entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs
else:
entity_spans, entity_spans_pair = (entity_spans_or_entity_spans_pairs, None)
first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans = self._create_input_sequence(text=text, text_pair=text_pair, entities=entities, entities_pair=entities_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, **kwargs)
input_ids.append((first_ids, second_ids))
entity_ids.append((first_entity_ids, second_entity_ids))
entity_token_spans.append((first_entity_token_spans, second_entity_token_spans))
batch_outputs = self._batch_prepare_for_model(input_ids, batch_entity_ids_pairs=entity_ids, batch_entity_token_spans_pairs=entity_token_spans, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose)
return BatchEncoding(batch_outputs)
def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]):
if not isinstance(entity_spans, list):
raise TypeError('entity_spans should be given as a list')
elif len(entity_spans) > 0 and (not isinstance(entity_spans[0], tuple)):
raise ValueError('entity_spans should be given as a list of tuples containing the start and end character indices')
if entities is not None:
if not isinstance(entities, list):
raise ValueError('If you specify entities, they should be given as a list')
if len(entities) > 0 and (not isinstance(entities[0], str)):
raise ValueError('If you specify entities, they should be given as a list of entity names')
if len(entities) != len(entity_spans):
raise ValueError('If you specify entities, entities and entity_spans must be the same length')
def _create_input_sequence(self, text: Union[TextInput], text_pair: Optional[Union[TextInput]]=None, entities: Optional[EntityInput]=None, entities_pair: Optional[EntityInput]=None, entity_spans: Optional[EntitySpanInput]=None, entity_spans_pair: Optional[EntitySpanInput]=None, **kwargs) -> tuple[list, list, list, list, list, list]:
def get_input_ids(text):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
def get_input_ids_and_entity_token_spans(text, entity_spans):
if entity_spans is None:
return (get_input_ids(text), None)
cur = 0
input_ids = []
entity_token_spans = [None] * len(entity_spans)
split_char_positions = sorted(frozenset(itertools.chain(*entity_spans)))
char_pos2token_pos = {}
for split_char_position in split_char_positions:
orig_split_char_position = split_char_position
if split_char_position > 0 and text[split_char_position - 1] == ' ':
split_char_position -= 1
if cur != split_char_position:
input_ids += get_input_ids(text[cur:split_char_position])
cur = split_char_position
char_pos2token_pos[orig_split_char_position] = len(input_ids)
input_ids += get_input_ids(text[cur:])
entity_token_spans = [(char_pos2token_pos[char_start], char_pos2token_pos[char_end]) for char_start, char_end in entity_spans]
return (input_ids, entity_token_spans)
first_ids, second_ids = (None, None)
first_entity_ids, second_entity_ids = (None, None)
first_entity_token_spans, second_entity_token_spans = (None, None)
if self.task is None:
if entity_spans is None:
first_ids = get_input_ids(text)
else:
self._check_entity_input_format(entities, entity_spans)
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
if entities is None:
first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
else:
first_entity_ids = [self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities]
if text_pair is not None:
if entity_spans_pair is None:
second_ids = get_input_ids(text_pair)
else:
self._check_entity_input_format(entities_pair, entity_spans_pair)
second_ids, second_entity_token_spans = get_input_ids_and_entity_token_spans(text_pair, entity_spans_pair)
if entities_pair is None:
second_entity_ids = [self.entity_mask_token_id] * len(entity_spans_pair)
else:
second_entity_ids = [self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities_pair]
elif self.task == 'entity_classification':
if not (isinstance(entity_spans, list) and len(entity_spans) == 1 and isinstance(entity_spans[0], tuple)):
raise ValueError('Entity spans should be a list containing a single tuple containing the start and end character indices of an entity')
first_entity_ids = [self.entity_mask_token_id]
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
entity_token_start, entity_token_end = first_entity_token_spans[0]
first_ids = first_ids[:entity_token_end] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_end:]
first_ids = first_ids[:entity_token_start] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_start:]
first_entity_token_spans = [(entity_token_start, entity_token_end + 2)]
elif self.task == 'entity_pair_classification':
if not (isinstance(entity_spans, list) and len(entity_spans) == 2 and isinstance(entity_spans[0], tuple) and isinstance(entity_spans[1], tuple)):
raise ValueError('Entity spans should be provided as a list of two tuples, each tuple containing the start and end character indices of an entity')
head_span, tail_span = entity_spans
first_entity_ids = [self.entity_mask_token_id, self.entity_mask2_token_id]
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
head_token_span, tail_token_span = first_entity_token_spans
token_span_with_special_token_ids = [(head_token_span, self.additional_special_tokens_ids[0]), (tail_token_span, self.additional_special_tokens_ids[1])]
if head_token_span[0] < tail_token_span[0]:
first_entity_token_spans[0] = (head_token_span[0], head_token_span[1] + 2)
first_entity_token_spans[1] = (tail_token_span[0] + 2, tail_token_span[1] + 4)
token_span_with_special_token_ids = reversed(token_span_with_special_token_ids)
else:
first_entity_token_spans[0] = (head_token_span[0] + 2, head_token_span[1] + 4)
first_entity_token_spans[1] = (tail_token_span[0], tail_token_span[1] + 2)
for (entity_token_start, entity_token_end), special_token_id in token_span_with_special_token_ids:
first_ids = first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:]
first_ids = first_ids[:entity_token_start] + [special_token_id] + first_ids[entity_token_start:]
elif self.task == 'entity_span_classification':
if not (isinstance(entity_spans, list) and len(entity_spans) > 0 and isinstance(entity_spans[0], tuple)):
raise ValueError('Entity spans should be provided as a list of tuples, each tuple containing the start and end character indices of an entity')
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
else:
raise ValueError(f'Task {self.task} not supported')
return (first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(self, batch_ids_pairs: list[tuple[list[int], None]], batch_entity_ids_pairs: list[tuple[Optional[list[int]], Optional[list[int]]]], batch_entity_token_spans_pairs: list[tuple[Optional[list[tuple[int, int]]], Optional[list[tuple[int, int]]]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
batch_entity_ids_pairs: list of entity ids or entity ids pairs
batch_entity_token_spans_pairs: list of entity spans or entity spans pairs
max_entity_length: The maximum length of the entity sequence.
"""
batch_outputs = {}
for input_ids, entity_ids, entity_token_span_pairs in zip(batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs):
first_ids, second_ids = input_ids
first_entity_ids, second_entity_ids = entity_ids
first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs
outputs = self.prepare_for_model(first_ids, second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, ids: list[int], pair_ids: Optional[list[int]]=None, entity_ids: Optional[list[int]]=None, pair_entity_ids: Optional[list[int]]=None, entity_token_spans: Optional[list[tuple[int, int]]]=None, pair_entity_token_spans: Optional[list[tuple[int, int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
"""
Prepares a sequence of input id, entity id and entity span, or a pair of sequences of inputs ids, entity ids,
entity spans so that it can be used by the model. It adds special tokens, truncates sequences if overflowing
while taking into account the special tokens and manages a moving window (with user defined stride) for
overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first*
or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an
error.
Args:
ids (`list[int]`):
Tokenized input ids of the first sequence.
pair_ids (`list[int]`, *optional*):
Tokenized input ids of the second sequence.
entity_ids (`list[int]`, *optional*):
Entity ids of the first sequence.
pair_entity_ids (`list[int]`, *optional*):
Entity ids of the second sequence.
entity_token_spans (`list[tuple[int, int]]`, *optional*):
Entity spans of the first sequence.
pair_entity_token_spans (`list[tuple[int, int]]`, *optional*):
Entity spans of the second sequence.
max_entity_length (`int`, *optional*):
The maximum length of the entity sequence.
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
if return_token_type_ids and (not add_special_tokens):
raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')
if return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and (pair_ids is not None):
raise ValueError('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.')
if return_token_type_ids is None:
return_token_type_ids = 'token_type_ids' in self.model_input_names
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
encoded_inputs = {}
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
overflowing_tokens = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and (total_len > max_length):
ids, pair_ids, overflowing_tokens = self.truncate_sequences(ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride)
if return_overflowing_tokens:
encoded_inputs['overflowing_tokens'] = overflowing_tokens
encoded_inputs['num_truncated_tokens'] = total_len - max_length
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
entity_token_offset = 1
pair_entity_token_offset = len(ids) + 3
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
entity_token_offset = 0
pair_entity_token_offset = len(ids)
encoded_inputs['input_ids'] = sequence
if return_token_type_ids:
encoded_inputs['token_type_ids'] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs['special_tokens_mask'] = [0] * len(sequence)
if not max_entity_length:
max_entity_length = self.max_entity_length
if entity_ids is not None:
total_entity_len = 0
num_invalid_entities = 0
valid_entity_ids = [ent_id for ent_id, span in zip(entity_ids, entity_token_spans) if span[1] <= len(ids)]
valid_entity_token_spans = [span for span in entity_token_spans if span[1] <= len(ids)]
total_entity_len += len(valid_entity_ids)
num_invalid_entities += len(entity_ids) - len(valid_entity_ids)
valid_pair_entity_ids, valid_pair_entity_token_spans = (None, None)
if pair_entity_ids is not None:
valid_pair_entity_ids = [ent_id for ent_id, span in zip(pair_entity_ids, pair_entity_token_spans) if span[1] <= len(pair_ids)]
valid_pair_entity_token_spans = [span for span in pair_entity_token_spans if span[1] <= len(pair_ids)]
total_entity_len += len(valid_pair_entity_ids)
num_invalid_entities += len(pair_entity_ids) - len(valid_pair_entity_ids)
if num_invalid_entities != 0:
logger.warning(f'{num_invalid_entities} entities are ignored because their entity spans are invalid due to the truncation of input tokens')
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and total_entity_len > max_entity_length:
valid_entity_ids, valid_pair_entity_ids, overflowing_entities = self.truncate_sequences(valid_entity_ids, pair_ids=valid_pair_entity_ids, num_tokens_to_remove=total_entity_len - max_entity_length, truncation_strategy=truncation_strategy, stride=stride)
valid_entity_token_spans = valid_entity_token_spans[:len(valid_entity_ids)]
if valid_pair_entity_token_spans is not None:
valid_pair_entity_token_spans = valid_pair_entity_token_spans[:len(valid_pair_entity_ids)]
if return_overflowing_tokens:
encoded_inputs['overflowing_entities'] = overflowing_entities
encoded_inputs['num_truncated_entities'] = total_entity_len - max_entity_length
final_entity_ids = valid_entity_ids + valid_pair_entity_ids if valid_pair_entity_ids else valid_entity_ids
encoded_inputs['entity_ids'] = list(final_entity_ids)
entity_position_ids = []
entity_start_positions = []
entity_end_positions = []
for token_spans, offset in ((valid_entity_token_spans, entity_token_offset), (valid_pair_entity_token_spans, pair_entity_token_offset)):
if token_spans is not None:
for start, end in token_spans:
start += offset
end += offset
position_ids = list(range(start, end))[:self.max_mention_length]
position_ids += [-1] * (self.max_mention_length - end + start)
entity_position_ids.append(position_ids)
entity_start_positions.append(start)
entity_end_positions.append(end - 1)
encoded_inputs['entity_position_ids'] = entity_position_ids
if self.task == 'entity_span_classification':
encoded_inputs['entity_start_positions'] = entity_start_positions
encoded_inputs['entity_end_positions'] = entity_end_positions
if return_token_type_ids:
encoded_inputs['entity_token_type_ids'] = [0] * len(encoded_inputs['entity_ids'])
self._eventual_warn_about_too_long_sequence(encoded_inputs['input_ids'], max_length, verbose)
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(encoded_inputs, max_length=max_length, max_entity_length=max_entity_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
if return_length:
encoded_inputs['length'] = len(encoded_inputs['input_ids'])
batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)
return batch_outputs
def pad(self, encoded_inputs: Union[BatchEncoding, list[BatchEncoding], dict[str, EncodedInput], dict[str, list[EncodedInput]], list[dict[str, EncodedInput]]], padding: Union[bool, str, PaddingStrategy]=True, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, verbose: bool=True) -> BatchEncoding:
"""
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with
`self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`) .. note:: If the `encoded_inputs` passed
are dictionary of numpy arrays or PyTorch tensors the result will use the same type unless
you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the
specific device of your tensors however.
Args:
encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `dict[str, list[int]]`, `dict[str, list[list[int]]` or `list[dict[str, list[int]]]`):
Tokenized inputs. Can represent one input ([`BatchEncoding`] or `dict[str, list[int]]`) or a batch of
tokenized inputs (list of [`BatchEncoding`], *dict[str, list[list[int]]]* or *list[dict[str,
list[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
collate function. Instead of `list[int]` you can have tensors (numpy arrays, or PyTorch tensors),
see the note above for the return type.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
max_entity_length (`int`, *optional*):
The maximum length of the entity sequence.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention
masks?](../glossary#attention-mask)
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
"""
if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping):
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0]}
if self.model_input_names[0] not in encoded_inputs:
raise ValueError(f'You should supply an encoding or a list of encodings to this method that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}')
required_input = encoded_inputs[self.model_input_names[0]]
if not required_input:
if return_attention_mask:
encoded_inputs['attention_mask'] = []
return encoded_inputs
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
index = 0
while len(required_input[index]) == 0:
index += 1
if index < len(required_input):
first_element = required_input[index][0]
if not isinstance(first_element, (int, list, tuple)):
if is_torch_tensor(first_element):
return_tensors = 'pt' if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = 'np' if return_tensors is None else return_tensors
else:
raise ValueError(f'type of {first_element} unknown: {type(first_element)}. Should be one of a python, numpy, or pytorch object.')
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(padding=padding, max_length=max_length, verbose=verbose)
if max_entity_length is None:
max_entity_length = self.max_entity_length
required_input = encoded_inputs[self.model_input_names[0]]
if required_input and (not isinstance(required_input[0], (list, tuple))):
encoded_inputs = self._pad(encoded_inputs, max_length=max_length, max_entity_length=max_entity_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(required_input)
if any((len(v) != batch_size for v in encoded_inputs.values())):
raise ValueError('Some items in the output dictionary have a different batch size than others.')
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max((len(inputs) for inputs in required_input))
max_entity_length = max((len(inputs) for inputs in encoded_inputs['entity_ids'])) if 'entity_ids' in encoded_inputs else 0
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = {k: v[i] for k, v in encoded_inputs.items()}
outputs = self._pad(inputs, max_length=max_length, max_entity_length=max_entity_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, max_entity_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
max_entity_length: The maximum length of the entity sequence.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
entities_provided = bool('entity_ids' in encoded_inputs)
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(encoded_inputs['input_ids'])
if entities_provided:
max_entity_length = len(encoded_inputs['entity_ids'])
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
if entities_provided and max_entity_length is not None and (pad_to_multiple_of is not None) and (max_entity_length % pad_to_multiple_of != 0):
max_entity_length = (max_entity_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and (len(encoded_inputs['input_ids']) != max_length or (entities_provided and len(encoded_inputs['entity_ids']) != max_entity_length))
if return_attention_mask and 'attention_mask' not in encoded_inputs:
encoded_inputs['attention_mask'] = [1] * len(encoded_inputs['input_ids'])
if entities_provided and return_attention_mask and ('entity_attention_mask' not in encoded_inputs):
encoded_inputs['entity_attention_mask'] = [1] * len(encoded_inputs['entity_ids'])
if needs_to_be_padded:
difference = max_length - len(encoded_inputs['input_ids'])
padding_side = padding_side if padding_side is not None else self.padding_side
if entities_provided:
entity_difference = max_entity_length - len(encoded_inputs['entity_ids'])
if padding_side == 'right':
if return_attention_mask:
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference
if entities_provided:
encoded_inputs['entity_attention_mask'] = encoded_inputs['entity_attention_mask'] + [0] * entity_difference
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [0] * difference
if entities_provided:
encoded_inputs['entity_token_type_ids'] = encoded_inputs['entity_token_type_ids'] + [0] * entity_difference
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference
encoded_inputs['input_ids'] = encoded_inputs['input_ids'] + [self.pad_token_id] * difference
if entities_provided:
encoded_inputs['entity_ids'] = encoded_inputs['entity_ids'] + [self.entity_pad_token_id] * entity_difference
encoded_inputs['entity_position_ids'] = encoded_inputs['entity_position_ids'] + [[-1] * self.max_mention_length] * entity_difference
if self.task == 'entity_span_classification':
encoded_inputs['entity_start_positions'] = encoded_inputs['entity_start_positions'] + [0] * entity_difference
encoded_inputs['entity_end_positions'] = encoded_inputs['entity_end_positions'] + [0] * entity_difference
elif padding_side == 'left':
if return_attention_mask:
encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']
if entities_provided:
encoded_inputs['entity_attention_mask'] = [0] * entity_difference + encoded_inputs['entity_attention_mask']
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = [0] * difference + encoded_inputs['token_type_ids']
if entities_provided:
encoded_inputs['entity_token_type_ids'] = [0] * entity_difference + encoded_inputs['entity_token_type_ids']
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']
encoded_inputs['input_ids'] = [self.pad_token_id] * difference + encoded_inputs['input_ids']
if entities_provided:
encoded_inputs['entity_ids'] = [self.entity_pad_token_id] * entity_difference + encoded_inputs['entity_ids']
encoded_inputs['entity_position_ids'] = [[-1] * self.max_mention_length] * entity_difference + encoded_inputs['entity_position_ids']
if self.task == 'entity_span_classification':
encoded_inputs['entity_start_positions'] = [0] * entity_difference + encoded_inputs['entity_start_positions']
encoded_inputs['entity_end_positions'] = [0] * entity_difference + encoded_inputs['entity_end_positions']
else:
raise ValueError('Invalid padding strategy:' + str(padding_side))
return encoded_inputs
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str, str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
entity_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['entity_vocab_file'])
with open(entity_vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.entity_vocab, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
return (out_vocab_file, entity_vocab_file)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM-RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
@requires(backends=('sentencepiece',))
class MLukeTokenizer(PreTrainedTokenizer):
'''
Adapted from [`XLMRobertaTokenizer`] and [`LukeTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
entity_vocab_file (`str`):
Path to the entity vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
task (`str`, *optional*):
Task for which you want to prepare sequences. One of `"entity_classification"`,
`"entity_pair_classification"`, or `"entity_span_classification"`. If you specify this argument, the entity
sequence is automatically created based on the given entity span(s).
max_entity_length (`int`, *optional*, defaults to 32):
The maximum length of `entity_ids`.
max_mention_length (`int`, *optional*, defaults to 30):
The maximum number of tokens inside an entity span.
entity_token_1 (`str`, *optional*, defaults to `<ent>`):
The special token used to represent an entity span in a word token sequence. This token is only used when
`task` is set to `"entity_classification"` or `"entity_pair_classification"`.
entity_token_2 (`str`, *optional*, defaults to `<ent2>`):
The special token used to represent an entity span in a word token sequence. This token is only used when
`task` is set to `"entity_pair_classification"`.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
'''
def __init__(self, vocab_file, entity_vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', task=None, max_entity_length=32, max_mention_length=30, entity_token_1='<ent>', entity_token_2='<ent2>', entity_unk_token='[UNK]', entity_pad_token='[PAD]', entity_mask_token='[MASK]', entity_mask2_token='[MASK2]', sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def _tokenize(self, text: str) -> list[str]:
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (strings for sub-words) in a single string.'''
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, list[TextInput]], text_pair: Optional[Union[TextInput, list[TextInput]]]=None, entity_spans: Optional[Union[EntitySpanInput, list[EntitySpanInput]]]=None, entity_spans_pair: Optional[Union[EntitySpanInput, list[EntitySpanInput]]]=None, entities: Optional[Union[EntityInput, list[EntityInput]]]=None, entities_pair: Optional[Union[EntityInput, list[EntityInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences, depending on the task you want to prepare them for.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
tokenizer does not support tokenization based on pretokenized strings.
text_pair (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
tokenizer does not support tokenization based on pretokenized strings.
entity_spans (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*):
The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
with two integers denoting character-based start and end positions of entities. If you specify
`"entity_classification"` or `"entity_pair_classification"` as the `task` argument in the constructor,
the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each
sequence must be equal to the length of each sequence of `entities`.
entity_spans_pair (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*):
The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
with two integers denoting character-based start and end positions of entities. If you specify the
`task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the
length of each sequence must be equal to the length of each sequence of `entities_pair`.
entities (`list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
each sequence must be equal to the length of each sequence of `entity_spans`. If you specify
`entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences
is automatically constructed by filling it with the [MASK] entity.
entities_pair (`list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify
`entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity
sequences is automatically constructed by filling it with the [MASK] entity.
max_entity_length (`int`, *optional*):
The maximum length of `entity_ids`.
'''
pass
def _encode_plus(self, text: Union[TextInput], text_pair: Optional[Union[TextInput]]=None, entity_spans: Optional[EntitySpanInput]=None, entity_spans_pair: Optional[EntitySpanInput]=None, entities: Optional[EntityInput]=None, entities_pair: Optional[EntityInput]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair]], batch_entity_spans_or_entity_spans_pairs: Optional[Union[list[EntitySpanInput], list[tuple[EntitySpanInput, EntitySpanInput]]]]=None, batch_entities_or_entities_pairs: Optional[Union[list[EntityInput], list[tuple[EntityInput, EntityInput]]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]):
pass
def _create_input_sequence(self, text: Union[TextInput], text_pair: Optional[Union[TextInput]]=None, entities: Optional[EntityInput]=None, entities_pair: Optional[EntityInput]=None, entity_spans: Optional[EntitySpanInput]=None, entity_spans_pair: Optional[EntitySpanInput]=None, **kwargs) -> tuple[list, list, list, list, list, list]:
pass
def get_input_ids(text):
pass
def get_input_ids_and_entity_token_spans(text, entity_spans):
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(self, batch_ids_pairs: list[tuple[list[int], None]], batch_entity_ids_pairs: list[tuple[Optional[list[int]], Optional[list[int]]]], batch_entity_token_spans_pairs: list[tuple[Optional[list[tuple[int, int]]], Optional[list[tuple[int, int]]]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
'''
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
batch_entity_ids_pairs: list of entity ids or entity ids pairs
batch_entity_token_spans_pairs: list of entity spans or entity spans pairs
max_entity_length: The maximum length of the entity sequence.
'''
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, ids: list[int], pair_ids: Optional[list[int]]=None, entity_ids: Optional[list[int]]=None, pair_entity_ids: Optional[list[int]]=None, entity_token_spans: Optional[list[tuple[int, int]]]=None, pair_entity_token_spans: Optional[list[tuple[int, int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
'''
Prepares a sequence of input id, entity id and entity span, or a pair of sequences of inputs ids, entity ids,
entity spans so that it can be used by the model. It adds special tokens, truncates sequences if overflowing
while taking into account the special tokens and manages a moving window (with user defined stride) for
overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first*
or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an
error.
Args:
ids (`list[int]`):
Tokenized input ids of the first sequence.
pair_ids (`list[int]`, *optional*):
Tokenized input ids of the second sequence.
entity_ids (`list[int]`, *optional*):
Entity ids of the first sequence.
pair_entity_ids (`list[int]`, *optional*):
Entity ids of the second sequence.
entity_token_spans (`list[tuple[int, int]]`, *optional*):
Entity spans of the first sequence.
pair_entity_token_spans (`list[tuple[int, int]]`, *optional*):
Entity spans of the second sequence.
max_entity_length (`int`, *optional*):
The maximum length of the entity sequence.
'''
pass
def pad(self, encoded_inputs: Union[BatchEncoding, list[BatchEncoding], dict[str, EncodedInput], dict[str, list[EncodedInput]], list[dict[str, EncodedInput]]], padding: Union[bool, str, PaddingStrategy]=True, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, verbose: bool=True) -> BatchEncoding:
'''
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with
`self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`) .. note:: If the `encoded_inputs` passed
are dictionary of numpy arrays or PyTorch tensors the result will use the same type unless
you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the
specific device of your tensors however.
Args:
encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `dict[str, list[int]]`, `dict[str, list[list[int]]` or `list[dict[str, list[int]]]`):
Tokenized inputs. Can represent one input ([`BatchEncoding`] or `dict[str, list[int]]`) or a batch of
tokenized inputs (list of [`BatchEncoding`], *dict[str, list[list[int]]]* or *list[dict[str,
list[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
collate function. Instead of `list[int]` you can have tensors (numpy arrays, or PyTorch tensors),
see the note above for the return type.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
max_entity_length (`int`, *optional*):
The maximum length of the entity sequence.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention
masks?](../glossary#attention-mask)
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
'''
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, max_entity_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
'''
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
max_entity_length: The maximum length of the entity sequence.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str, str]:
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM-RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
| 30
| 12
| 59
| 6
| 44
| 10
| 7
| 0.31
| 1
| 21
| 2
| 0
| 22
| 15
| 22
| 111
| 1,506
| 175
| 1,026
| 341
| 809
| 314
| 449
| 136
| 424
| 30
| 3
| 4
| 170
|
3,855
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/configuration_mobilebert.py
|
transformers.models.mobilebert.configuration_mobilebert.MobileBertConfig
|
from ...configuration_utils import PretrainedConfig
class MobileBertConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MobileBertModel`] or a [`TFMobileBertModel`]. It
is used to instantiate a MobileBERT model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the MobileBERT
[google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the MobileBERT model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`MobileBertModel`] or [`TFMobileBertModel`].
hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 512):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`MobileBertModel`] or
[`TFMobileBertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
The ID of the token in the word embedding to use as padding.
embedding_size (`int`, *optional*, defaults to 128):
The dimension of the word embedding vectors.
trigram_input (`bool`, *optional*, defaults to `True`):
Use a convolution of trigram as input.
use_bottleneck (`bool`, *optional*, defaults to `True`):
Whether to use bottleneck in BERT.
intra_bottleneck_size (`int`, *optional*, defaults to 128):
Size of bottleneck layer output.
use_bottleneck_attention (`bool`, *optional*, defaults to `False`):
Whether to use attention inputs from the bottleneck transformation.
key_query_shared_bottleneck (`bool`, *optional*, defaults to `True`):
Whether to use the same linear transformation for query&key in the bottleneck.
num_feedforward_networks (`int`, *optional*, defaults to 4):
Number of FFNs in a block.
normalization_type (`str`, *optional*, defaults to `"no_norm"`):
The normalization type in MobileBERT.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import MobileBertConfig, MobileBertModel
>>> # Initializing a MobileBERT configuration
>>> configuration = MobileBertConfig()
>>> # Initializing a model (with random weights) from the configuration above
>>> model = MobileBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'mobilebert'
def __init__(self, vocab_size=30522, hidden_size=512, num_hidden_layers=24, num_attention_heads=4, intermediate_size=512, hidden_act='relu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, embedding_size=128, trigram_input=True, use_bottleneck=True, intra_bottleneck_size=128, use_bottleneck_attention=False, key_query_shared_bottleneck=True, num_feedforward_networks=4, normalization_type='no_norm', classifier_activation=True, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.embedding_size = embedding_size
self.trigram_input = trigram_input
self.use_bottleneck = use_bottleneck
self.intra_bottleneck_size = intra_bottleneck_size
self.use_bottleneck_attention = use_bottleneck_attention
self.key_query_shared_bottleneck = key_query_shared_bottleneck
self.num_feedforward_networks = num_feedforward_networks
self.normalization_type = normalization_type
self.classifier_activation = classifier_activation
if self.use_bottleneck:
self.true_hidden_size = intra_bottleneck_size
else:
self.true_hidden_size = hidden_size
self.classifier_dropout = classifier_dropout
|
class MobileBertConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MobileBertModel`] or a [`TFMobileBertModel`]. It
is used to instantiate a MobileBERT model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the MobileBERT
[google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the MobileBERT model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`MobileBertModel`] or [`TFMobileBertModel`].
hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 512):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`MobileBertModel`] or
[`TFMobileBertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
The ID of the token in the word embedding to use as padding.
embedding_size (`int`, *optional*, defaults to 128):
The dimension of the word embedding vectors.
trigram_input (`bool`, *optional*, defaults to `True`):
Use a convolution of trigram as input.
use_bottleneck (`bool`, *optional*, defaults to `True`):
Whether to use bottleneck in BERT.
intra_bottleneck_size (`int`, *optional*, defaults to 128):
Size of bottleneck layer output.
use_bottleneck_attention (`bool`, *optional*, defaults to `False`):
Whether to use attention inputs from the bottleneck transformation.
key_query_shared_bottleneck (`bool`, *optional*, defaults to `True`):
Whether to use the same linear transformation for query&key in the bottleneck.
num_feedforward_networks (`int`, *optional*, defaults to 4):
Number of FFNs in a block.
normalization_type (`str`, *optional*, defaults to `"no_norm"`):
The normalization type in MobileBERT.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import MobileBertConfig, MobileBertModel
>>> # Initializing a MobileBERT configuration
>>> configuration = MobileBertConfig()
>>> # Initializing a model (with random weights) from the configuration above
>>> model = MobileBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, vocab_size=30522, hidden_size=512, num_hidden_layers=24, num_attention_heads=4, intermediate_size=512, hidden_act='relu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, embedding_size=128, trigram_input=True, use_bottleneck=True, intra_bottleneck_size=128, use_bottleneck_attention=False, key_query_shared_bottleneck=True, num_feedforward_networks=4, normalization_type='no_norm', classifier_activation=True, classifier_dropout=None, **kwargs):
pass
| 2
| 1
| 57
| 3
| 54
| 0
| 2
| 1.2
| 1
| 1
| 0
| 0
| 1
| 23
| 1
| 1
| 137
| 14
| 56
| 52
| 28
| 67
| 29
| 26
| 27
| 2
| 1
| 1
| 2
|
3,856
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/configuration_mobilebert.py
|
transformers.models.mobilebert.configuration_mobilebert.MobileBertOnnxConfig
|
from collections import OrderedDict
from ...onnx import OnnxConfig
from collections.abc import Mapping
class MobileBertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
|
class MobileBertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
| 3
| 0
| 12
| 0
| 12
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 14
| 0
| 14
| 4
| 11
| 0
| 6
| 3
| 4
| 2
| 1
| 1
| 2
|
3,857
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.Bottleneck
|
import torch
from torch import nn
class Bottleneck(nn.Module):
def __init__(self, config):
super().__init__()
self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
self.use_bottleneck_attention = config.use_bottleneck_attention
self.input = BottleneckLayer(config)
if self.key_query_shared_bottleneck:
self.attention = BottleneckLayer(config)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]:
bottlenecked_hidden_states = self.input(hidden_states)
if self.use_bottleneck_attention:
return (bottlenecked_hidden_states,) * 4
elif self.key_query_shared_bottleneck:
shared_attention_input = self.attention(hidden_states)
return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
else:
return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
|
class Bottleneck(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 16
| 1
| 8
| 8
| 3
| 0.88
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 34
| 2
| 17
| 9
| 14
| 15
| 15
| 9
| 12
| 3
| 1
| 1
| 5
|
3,858
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.BottleneckLayer
|
import torch
from torch import nn
class BottleneckLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
layer_input = self.dense(hidden_states)
layer_input = self.LayerNorm(layer_input)
return layer_input
|
class BottleneckLayer(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 6
| 6
| 0
| 9
| 6
| 6
| 1
| 1
| 0
| 2
|
3,859
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.FFNLayer
|
from torch import nn
import torch
class FFNLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate = MobileBertIntermediate(config)
self.output = FFNOutput(config)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
intermediate_output = self.intermediate(hidden_states)
layer_outputs = self.output(intermediate_output, hidden_states)
return layer_outputs
|
class FFNLayer(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 4
| 2
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 7
| 6
| 0
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
3,860
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.FFNOutput
|
import torch
from torch import nn
class FFNOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
layer_outputs = self.dense(hidden_states)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
|
class FFNOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 6
| 6
| 0
| 9
| 6
| 6
| 1
| 1
| 0
| 2
|
3,861
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertAttention
|
from ...processing_utils import Unpack
from torch import nn
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
class MobileBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = MobileBertSelfAttention(config)
self.output = MobileBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, query_tensor: torch.Tensor, key_tensor: torch.Tensor, value_tensor: torch.Tensor, layer_input: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
attention_output, attn_weights = self.self(query_tensor, key_tensor, value_tensor, attention_mask, head_mask, **kwargs)
attention_output = self.output(attention_output, layer_input)
return (attention_output, attn_weights)
|
class MobileBertAttention(nn.Module):
def __init__(self, config):
pass
def prune_heads(self, heads):
pass
def forward(self, query_tensor: torch.Tensor, key_tensor: torch.Tensor, value_tensor: torch.Tensor, layer_input: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 13
| 2
| 1
| 0.13
| 1
| 6
| 2
| 0
| 3
| 3
| 3
| 13
| 48
| 4
| 40
| 20
| 27
| 5
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
3,862
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertEmbeddings
|
from typing import Callable, Optional, Union
from torch import nn
import torch
class MobileBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.trigram_input = config.trigram_input
self.embedding_size = config.embedding_size
self.hidden_size = config.hidden_size
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
embed_dim_multiplier = 3 if self.trigram_input else 1
embedded_input_size = self.embedding_size * embed_dim_multiplier
self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.trigram_input:
inputs_embeds = torch.cat([nn.functional.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0.0), inputs_embeds, nn.functional.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0.0)], dim=2)
if self.trigram_input or self.embedding_size != self.hidden_size:
inputs_embeds = self.embedding_transformation(inputs_embeds)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class MobileBertEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 35
| 5
| 26
| 5
| 5
| 0.21
| 1
| 2
| 0
| 0
| 2
| 9
| 2
| 12
| 74
| 11
| 52
| 25
| 43
| 11
| 36
| 19
| 33
| 7
| 1
| 1
| 9
|
3,863
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertEncoder
|
import torch
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
class MobileBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i], **kwargs)
return BaseModelOutput(last_hidden_state=hidden_states)
|
class MobileBertEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
pass
| 3
| 0
| 19
| 2
| 17
| 1
| 5
| 0.03
| 1
| 8
| 2
| 0
| 2
| 1
| 2
| 12
| 40
| 5
| 34
| 16
| 23
| 1
| 19
| 8
| 16
| 8
| 1
| 2
| 9
|
3,864
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertForMaskedLM
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...processing_utils import Unpack
from torch import nn
@auto_docstring
class MobileBertForMaskedLM(MobileBertPreTrainedModel):
_tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias']
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
self.cls = MobileBertOnlyMLMHead(config)
self.config = config
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None) -> nn.Embedding:
self.cls.predictions.dense = self._get_resized_lm_head(self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True)
return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, MaskedLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.mobilebert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class MobileBertForMaskedLM(MobileBertPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None) -> nn.Embedding:
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, MaskedLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
'''
pass
| 9
| 1
| 14
| 1
| 11
| 2
| 2
| 0.14
| 1
| 6
| 3
| 0
| 5
| 3
| 5
| 6
| 85
| 11
| 66
| 29
| 40
| 9
| 29
| 16
| 23
| 5
| 2
| 1
| 9
|
3,865
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertForMultipleChoice
|
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring
class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
outputs = self.mobilebert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
'''
pass
| 6
| 1
| 39
| 5
| 31
| 4
| 7
| 0.1
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 87
| 10
| 70
| 30
| 47
| 7
| 29
| 15
| 26
| 11
| 2
| 1
| 13
|
3,866
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertForNextSentencePrediction
|
import torch
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import warnings
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring(custom_intro='\n MobileBert Model with a `next sentence prediction (classification)` head on top.\n ')
class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertOnlyNSPHead(config)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, NextSentencePredictorOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`.
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Examples:
```python
>>> from transformers import AutoTokenizer, MobileBertForNextSentencePrediction
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
>>> model = MobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
if 'next_sentence_label' in kwargs:
warnings.warn('The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning)
labels = kwargs.pop('next_sentence_label')
outputs = self.mobilebert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1))
return NextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n MobileBert Model with a `next sentence prediction (classification)` head on top.\n ')
class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, NextSentencePredictorOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`.
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Examples:
```python
>>> from transformers import AutoTokenizer, MobileBertForNextSentencePrediction
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
>>> model = MobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> loss = outputs.loss
>>> logits = outputs.logits
```'''
pass
| 6
| 1
| 45
| 8
| 27
| 11
| 4
| 0.38
| 1
| 6
| 3
| 0
| 2
| 2
| 2
| 3
| 94
| 17
| 56
| 25
| 38
| 21
| 22
| 11
| 19
| 6
| 2
| 1
| 7
|
3,867
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTraining
|
from ...processing_utils import Unpack
from torch import nn
from ...utils.generic import can_return_tuple, check_model_inputs
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from typing import Callable, Optional, Union
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
@auto_docstring(custom_intro='\n MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a\n `next sentence prediction (classification)` head.\n ')
class MobileBertForPreTraining(MobileBertPreTrainedModel):
_tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias']
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertPreTrainingHeads(config)
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None) -> nn.Embedding:
self.cls.predictions.dense = self._get_resized_lm_head(self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True)
return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, next_sentence_label: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, MobileBertForPreTrainingOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Examples:
```python
>>> from transformers import AutoTokenizer, MobileBertForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
>>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
>>> # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```"""
outputs = self.mobilebert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return MobileBertForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a\n `next sentence prediction (classification)` head.\n ')
class MobileBertForPreTraining(MobileBertPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None) -> nn.Embedding:
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, next_sentence_label: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, MobileBertForPreTrainingOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Examples:
```python
>>> from transformers import AutoTokenizer, MobileBertForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
>>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
>>> # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```'''
pass
| 9
| 1
| 19
| 3
| 12
| 5
| 2
| 0.4
| 1
| 5
| 3
| 0
| 5
| 2
| 5
| 6
| 106
| 18
| 63
| 31
| 42
| 25
| 30
| 17
| 24
| 5
| 2
| 1
| 9
|
3,868
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch
from dataclasses import dataclass
from typing import Callable, Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Output type of [`MobileBertForPreTraining`].\n ')
class MobileBertForPreTrainingOutput(ModelOutput):
"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
seq_relationship_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`MobileBertForPreTraining`].\n ')
class MobileBertForPreTrainingOutput(ModelOutput):
'''
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 4
| 6
| 6
| 5
| 21
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
3,869
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertForQuestionAnswering
|
from typing import Callable, Optional, Union
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils.generic import can_return_tuple, check_model_inputs
from ...processing_utils import Unpack
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring
class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
outputs = self.mobilebert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
pass
| 6
| 0
| 41
| 5
| 30
| 7
| 4
| 0.18
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 94
| 10
| 71
| 30
| 45
| 13
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
3,870
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertForSequenceClassification
|
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...processing_utils import Unpack
from ...utils.generic import can_return_tuple, check_model_inputs
from typing import Callable, Optional, Union
from torch import nn
@auto_docstring(custom_intro='\n MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.mobilebert = MobileBertModel(config)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.mobilebert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 42
| 4
| 35
| 4
| 7
| 0.09
| 1
| 6
| 2
| 0
| 2
| 5
| 2
| 3
| 94
| 9
| 78
| 28
| 55
| 7
| 36
| 15
| 33
| 12
| 2
| 3
| 14
|
3,871
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertForTokenClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...processing_utils import Unpack
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...utils.generic import can_return_tuple, check_model_inputs
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Callable, Optional, Union
import torch
@auto_docstring
class MobileBertForTokenClassification(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
outputs = self.mobilebert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class MobileBertForTokenClassification(MobileBertPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 6
| 1
| 32
| 4
| 26
| 3
| 4
| 0.08
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 74
| 9
| 60
| 27
| 37
| 5
| 23
| 14
| 20
| 5
| 2
| 1
| 7
|
3,872
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertIntermediate
|
from torch import nn
from ...activations import ACT2FN
import torch
class MobileBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class MobileBertIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
3,873
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertLMPredictionHead
|
import torch
from torch import nn
class MobileBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = MobileBertPredictionHeadTransform(config)
self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def _tie_weights(self) -> None:
self.decoder.bias = self.bias
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.transform(hidden_states)
hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))
hidden_states += self.decoder.bias
return hidden_states
|
class MobileBertLMPredictionHead(nn.Module):
def __init__(self, config):
pass
def _tie_weights(self) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 4
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 3
| 1
| 0
| 3
| 4
| 3
| 13
| 20
| 2
| 15
| 8
| 11
| 3
| 15
| 8
| 11
| 1
| 1
| 0
| 3
|
3,874
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertLayer
|
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from torch import nn
from ...modeling_layers import GradientCheckpointingLayer
class MobileBertLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.num_feedforward_networks = config.num_feedforward_networks
self.attention = MobileBertAttention(config)
self.intermediate = MobileBertIntermediate(config)
self.output = MobileBertOutput(config)
if self.use_bottleneck:
self.bottleneck = Bottleneck(config)
if config.num_feedforward_networks > 1:
self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
if self.use_bottleneck:
query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
else:
query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
self_attention_output, _ = self.attention(query_tensor, key_tensor, value_tensor, layer_input, attention_mask, head_mask, **kwargs)
attention_output = self_attention_output
if self.num_feedforward_networks != 1:
for ffn_module in self.ffn:
attention_output = ffn_module(attention_output)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output, hidden_states)
return layer_output
|
class MobileBertLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 30
| 2
| 28
| 1
| 4
| 0.02
| 1
| 10
| 5
| 0
| 2
| 7
| 2
| 12
| 61
| 5
| 56
| 24
| 47
| 1
| 28
| 18
| 25
| 4
| 1
| 2
| 7
|
3,875
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils.generic import can_return_tuple, check_model_inputs
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
import torch
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
@auto_docstring
class MobileBertModel(MobileBertPreTrainedModel):
"""
https://huggingface.co/papers/2004.02984
"""
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = MobileBertEmbeddings(config)
self.encoder = MobileBertEncoder(config)
self.pooler = MobileBertPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPooling]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
attention_mask = self._update_full_mask(attention_mask, embedding_output)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(embedding_output, attention_mask=attention_mask, head_mask=head_mask, **kwargs)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output)
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
|
@auto_docstring
class MobileBertModel(MobileBertPreTrainedModel):
'''
https://huggingface.co/papers/2004.02984
'''
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPooling]:
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
| 10
| 3
| 18
| 2
| 14
| 2
| 4
| 0.2
| 1
| 8
| 4
| 0
| 5
| 4
| 5
| 6
| 105
| 15
| 75
| 30
| 52
| 15
| 39
| 18
| 33
| 12
| 2
| 1
| 18
|
3,876
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertOnlyMLMHead
|
from torch import nn
import torch
class MobileBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
|
class MobileBertOnlyMLMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
3,877
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertOnlyNSPHead
|
from torch import nn
import torch
class MobileBertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
|
class MobileBertOnlyNSPHead(nn.Module):
def __init__(self, config):
pass
def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
3,878
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertOutput
|
from torch import nn
import torch
class MobileBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)
if not self.use_bottleneck:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
else:
self.bottleneck = OutputBottleneck(config)
def forward(self, intermediate_states: torch.Tensor, residual_tensor_1: torch.Tensor, residual_tensor_2: torch.Tensor) -> torch.Tensor:
layer_output = self.dense(intermediate_states)
if not self.use_bottleneck:
layer_output = self.dropout(layer_output)
layer_output = self.LayerNorm(layer_output + residual_tensor_1)
else:
layer_output = self.LayerNorm(layer_output + residual_tensor_1)
layer_output = self.bottleneck(layer_output, residual_tensor_2)
return layer_output
|
class MobileBertOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, intermediate_states: torch.Tensor, residual_tensor_1: torch.Tensor, residual_tensor_2: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 2
| 0
| 1
| 3
| 1
| 0
| 2
| 5
| 2
| 12
| 22
| 1
| 21
| 11
| 16
| 0
| 17
| 9
| 14
| 2
| 1
| 1
| 4
|
3,879
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertPooler
|
from torch import nn
import torch
class MobileBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.do_activate = config.classifier_activation
if self.do_activate:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
if not self.do_activate:
return first_token_tensor
else:
pooled_output = self.dense(first_token_tensor)
pooled_output = torch.tanh(pooled_output)
return pooled_output
|
class MobileBertPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 0
| 7
| 1
| 2
| 0.14
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 17
| 1
| 14
| 7
| 11
| 2
| 13
| 7
| 10
| 2
| 1
| 1
| 4
|
3,880
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
from .configuration_mobilebert import MobileBertConfig
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
@auto_docstring
class MobileBertPreTrainedModel(PreTrainedModel):
config: MobileBertConfig
base_model_prefix = 'mobilebert'
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': MobileBertLayer, 'attentions': MobileBertSelfAttention}
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, (nn.LayerNorm, NoNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, MobileBertLMPredictionHead):
module.bias.data.zero_()
|
@auto_docstring
class MobileBertPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.44
| 1
| 1
| 1
| 8
| 1
| 0
| 1
| 1
| 25
| 2
| 16
| 5
| 14
| 7
| 14
| 5
| 12
| 6
| 1
| 2
| 6
|
3,881
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertPreTrainingHeads
|
from torch import nn
import torch
class MobileBertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output: torch.Tensor, pooled_output: torch.Tensor) -> tuple[torch.Tensor]:
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return (prediction_scores, seq_relationship_score)
|
class MobileBertPreTrainingHeads(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output: torch.Tensor, pooled_output: torch.Tensor) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 7
| 6
| 0
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
3,882
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertPredictionHeadTransform
|
from torch import nn
import torch
from ...activations import ACT2FN
class MobileBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = NORM2FN['layer_norm'](config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class MobileBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 15
| 1
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
3,883
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertSelfAttention
|
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Callable, Optional, Union
from torch import nn
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...processing_utils import Unpack
class MobileBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size ** (-0.5)
self.query = nn.Linear(config.true_hidden_size, self.all_head_size)
self.key = nn.Linear(config.true_hidden_size, self.all_head_size)
self.value = nn.Linear(config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_causal = False
def forward(self, query_tensor: torch.Tensor, key_tensor: torch.Tensor, value_tensor: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
input_shape = query_tensor.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_layer = self.query(query_tensor).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(key_tensor).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(value_tensor).view(*hidden_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout.p, scaling=self.scaling, head_mask=head_mask, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return (attn_output, attn_weights)
|
class MobileBertSelfAttention(nn.Module):
def __init__(self, config):
pass
def forward(self, query_tensor: torch.Tensor, key_tensor: torch.Tensor, value_tensor: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 18
| 1
| 15
| 2
| 2
| 0.13
| 1
| 4
| 0
| 0
| 3
| 7
| 3
| 13
| 56
| 5
| 45
| 31
| 33
| 6
| 35
| 23
| 31
| 4
| 1
| 1
| 7
|
3,884
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.MobileBertSelfOutput
|
import torch
from torch import nn
class MobileBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
if not self.use_bottleneck:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
layer_outputs = self.dense(hidden_states)
if not self.use_bottleneck:
layer_outputs = self.dropout(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
|
class MobileBertSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 15
| 1
| 14
| 8
| 11
| 0
| 14
| 8
| 11
| 2
| 1
| 1
| 4
|
3,885
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.NoNorm
|
import torch
from torch import nn
class NoNorm(nn.Module):
def __init__(self, feat_size, eps=None):
super().__init__()
self.bias = nn.Parameter(torch.zeros(feat_size))
self.weight = nn.Parameter(torch.ones(feat_size))
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
return input_tensor * self.weight + self.bias
|
class NoNorm(nn.Module):
def __init__(self, feat_size, eps=None):
pass
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
3,886
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/modeling_mobilebert.py
|
transformers.models.mobilebert.modeling_mobilebert.OutputBottleneck
|
import torch
from torch import nn
class OutputBottleneck(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
layer_outputs = self.dense(hidden_states)
layer_outputs = self.dropout(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
|
class OutputBottleneck(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 7
| 8
| 0
| 11
| 7
| 8
| 1
| 1
| 0
| 2
|
3,887
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/tokenization_mobilebert.py
|
transformers.models.mobilebert.tokenization_mobilebert.MobileBertTokenizer
|
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
import collections
from typing import Optional
import os
class MobileBertTokenizer(PreTrainedTokenizer):
"""
Construct a MobileBERT tokenizer. Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original MobileBERT).
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs):
if not os.path.isfile(vocab_file):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = MobileBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text, split_special_tokens=False):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None):
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A MobileBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(vocab_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
|
class MobileBertTokenizer(PreTrainedTokenizer):
'''
Construct a MobileBERT tokenizer. Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original MobileBERT).
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
'''
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs):
pass
@property
def do_lower_case(self):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def _tokenize(self, text, split_special_tokens=False):
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A MobileBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 14
| 6
| 15
| 1
| 10
| 4
| 2
| 0.72
| 1
| 9
| 2
| 0
| 12
| 5
| 12
| 101
| 236
| 29
| 121
| 53
| 85
| 87
| 65
| 29
| 52
| 6
| 3
| 3
| 27
|
3,888
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py
|
transformers.models.mobilebert.tokenization_mobilebert_fast.MobileBertTokenizerFast
|
from tokenizers import normalizers
from typing import Optional
from .tokenization_mobilebert import MobileBertTokenizer
import json
from ...tokenization_utils_fast import PreTrainedTokenizerFast
class MobileBertTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" MobileBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (`bool`, *optional*, defaults to `True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original MobileBERT).
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
The prefix for subwords.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = MobileBertTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars:
normalizer_class = getattr(normalizers, normalizer_state.pop('type'))
normalizer_state['lowercase'] = do_lower_case
normalizer_state['strip_accents'] = strip_accents
normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars
self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
self.do_lower_case = do_lower_case
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A MobileBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1 is not None:
output += token_ids_1 + [self.sep_token_id]
return output
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
class MobileBertTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" MobileBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (`bool`, *optional*, defaults to `True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original MobileBERT).
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
The prefix for subwords.
'''
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A MobileBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 4
| 2
| 24
| 3
| 14
| 7
| 2
| 1.12
| 1
| 4
| 0
| 0
| 4
| 1
| 4
| 92
| 141
| 18
| 58
| 29
| 38
| 65
| 27
| 14
| 22
| 2
| 3
| 1
| 7
|
3,889
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
|
transformers.models.mobilenet_v1.configuration_mobilenet_v1.MobileNetV1Config
|
from ...configuration_utils import PretrainedConfig
class MobileNetV1Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MobileNetV1Model`]. It is used to instantiate a
MobileNetV1 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileNetV1
[google/mobilenet_v1_1.0_224](https://huggingface.co/google/mobilenet_v1_1.0_224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
depth_multiplier (`float`, *optional*, defaults to 1.0):
Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32
channels. This is sometimes also called "alpha" or "width multiplier".
min_depth (`int`, *optional*, defaults to 8):
All layers will have at least this many channels.
hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
tf_padding (`bool`, *optional*, defaults to `True`):
Whether to use TensorFlow padding rules on the convolution layers.
classifier_dropout_prob (`float`, *optional*, defaults to 0.999):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 0.001):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import MobileNetV1Config, MobileNetV1Model
>>> # Initializing a "mobilenet_v1_1.0_224" style configuration
>>> configuration = MobileNetV1Config()
>>> # Initializing a model from the "mobilenet_v1_1.0_224" style configuration
>>> model = MobileNetV1Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'mobilenet_v1'
def __init__(self, num_channels=3, image_size=224, depth_multiplier=1.0, min_depth=8, hidden_act='relu6', tf_padding=True, classifier_dropout_prob=0.999, initializer_range=0.02, layer_norm_eps=0.001, **kwargs):
super().__init__(**kwargs)
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.')
self.num_channels = num_channels
self.image_size = image_size
self.depth_multiplier = depth_multiplier
self.min_depth = min_depth
self.hidden_act = hidden_act
self.tf_padding = tf_padding
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
|
class MobileNetV1Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MobileNetV1Model`]. It is used to instantiate a
MobileNetV1 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileNetV1
[google/mobilenet_v1_1.0_224](https://huggingface.co/google/mobilenet_v1_1.0_224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
depth_multiplier (`float`, *optional*, defaults to 1.0):
Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32
channels. This is sometimes also called "alpha" or "width multiplier".
min_depth (`int`, *optional*, defaults to 8):
All layers will have at least this many channels.
hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
tf_padding (`bool`, *optional*, defaults to `True`):
Whether to use TensorFlow padding rules on the convolution layers.
classifier_dropout_prob (`float`, *optional*, defaults to 0.999):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 0.001):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import MobileNetV1Config, MobileNetV1Model
>>> # Initializing a "mobilenet_v1_1.0_224" style configuration
>>> configuration = MobileNetV1Config()
>>> # Initializing a model from the "mobilenet_v1_1.0_224" style configuration
>>> model = MobileNetV1Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_channels=3, image_size=224, depth_multiplier=1.0, min_depth=8, hidden_act='relu6', tf_padding=True, classifier_dropout_prob=0.999, initializer_range=0.02, layer_norm_eps=0.001, **kwargs):
pass
| 2
| 1
| 27
| 2
| 25
| 0
| 2
| 1.37
| 1
| 2
| 0
| 0
| 1
| 9
| 1
| 1
| 75
| 11
| 27
| 24
| 13
| 37
| 15
| 12
| 13
| 2
| 1
| 1
| 2
|
3,890
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
|
transformers.models.mobilenet_v1.configuration_mobilenet_v1.MobileNetV1OnnxConfig
|
from collections import OrderedDict
from ...onnx import OnnxConfig
from packaging import version
from collections.abc import Mapping
class MobileNetV1OnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch'})])
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'image-classification':
return OrderedDict([('logits', {0: 'batch'})])
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})])
@property
def atol_for_validation(self) -> float:
return 0.0001
|
class MobileNetV1OnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
| 7
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 3
| 0
| 3
| 3
| 17
| 3
| 14
| 8
| 7
| 0
| 10
| 5
| 6
| 2
| 1
| 1
| 4
|
3,891
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py
|
transformers.models.mobilenet_v1.feature_extraction_mobilenet_v1.MobileNetV1FeatureExtractor
|
from .image_processing_mobilenet_v1 import MobileNetV1ImageProcessor
from ...utils.import_utils import requires
import warnings
@requires(backends=('vision',))
class MobileNetV1FeatureExtractor(MobileNetV1ImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class MobileNetV1FeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use MobileNetV1ImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class MobileNetV1FeatureExtractor(MobileNetV1ImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 24
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
3,892
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
|
transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor
|
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
import numpy as np
from ...utils.import_utils import requires
from typing import Optional, Union
from ...image_transforms import get_resize_output_image_size, resize, to_channel_dimension_format
from ...utils import TensorType, filter_out_non_signature_kwargs, logging
@requires(backends=('vision',))
class MobileNetV1ImageProcessor(BaseImageProcessor):
"""
Constructs a MobileNetV1 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
`preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
Can be overridden by the `crop_size` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'shortest_edge': 256}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224}
crop_size = get_size_dict(crop_size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if 'shortest_edge' in size:
size = size['shortest_edge']
default_to_square = False
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size)
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
all_images.append(image)
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
|
@requires(backends=('vision',))
class MobileNetV1ImageProcessor(BaseImageProcessor):
'''
Constructs a MobileNetV1 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
`preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
Can be overridden by the `crop_size` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 6
| 3
| 71
| 5
| 45
| 21
| 9
| 0.74
| 1
| 8
| 2
| 1
| 3
| 10
| 3
| 23
| 257
| 19
| 137
| 57
| 95
| 101
| 63
| 19
| 59
| 19
| 3
| 2
| 27
|
3,893
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
|
transformers.models.mobilenet_v1.modeling_mobilenet_v1.MobileNetV1ConvLayer
|
import torch
from ...activations import ACT2FN
from .configuration_mobilenet_v1 import MobileNetV1Config
from typing import Optional, Union
from torch import nn
class MobileNetV1ConvLayer(nn.Module):
def __init__(self, config: MobileNetV1Config, in_channels: int, out_channels: int, kernel_size: int, stride: Optional[int]=1, groups: Optional[int]=1, bias: bool=False, use_normalization: Optional[bool]=True, use_activation: Optional[Union[bool, str]]=True) -> None:
super().__init__()
self.config = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.')
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.')
padding = 0 if config.tf_padding else int((kernel_size - 1) / 2)
self.convolution = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, padding_mode='zeros')
if use_normalization:
self.normalization = nn.BatchNorm2d(num_features=out_channels, eps=config.layer_norm_eps, momentum=0.9997, affine=True, track_running_stats=True)
else:
self.normalization = None
if use_activation:
if isinstance(use_activation, str):
self.activation = ACT2FN[use_activation]
elif isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
else:
self.activation = None
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.config.tf_padding:
features = apply_tf_padding(features, self.convolution)
features = self.convolution(features)
if self.normalization is not None:
features = self.normalization(features)
if self.activation is not None:
features = self.activation(features)
return features
|
class MobileNetV1ConvLayer(nn.Module):
def __init__(self, config: MobileNetV1Config, in_channels: int, out_channels: int, kernel_size: int, stride: Optional[int]=1, groups: Optional[int]=1, bias: bool=False, use_normalization: Optional[bool]=True, use_activation: Optional[Union[bool, str]]=True) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 31
| 3
| 29
| 0
| 6
| 0
| 1
| 7
| 1
| 0
| 2
| 4
| 2
| 12
| 64
| 6
| 58
| 19
| 44
| 0
| 28
| 8
| 25
| 8
| 1
| 2
| 12
|
3,894
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
|
transformers.models.mobilenet_v1.modeling_mobilenet_v1.MobileNetV1ForImageClassification
|
from .configuration_mobilenet_v1 import MobileNetV1Config
from typing import Optional, Union
import torch
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...utils import auto_docstring, logging
from torch import nn
@auto_docstring(custom_intro='\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class MobileNetV1ForImageClassification(MobileNetV1PreTrainedModel):
def __init__(self, config: MobileNetV1Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilenet_v1 = MobileNetV1Model(config)
last_hidden_size = self.mobilenet_v1.layer[-1].convolution.out_channels
self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilenet_v1(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(self.dropout(pooled_output))
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class MobileNetV1ForImageClassification(MobileNetV1PreTrainedModel):
def __init__(self, config: MobileNetV1Config) -> None:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 34
| 6
| 24
| 4
| 8
| 0.14
| 1
| 8
| 3
| 0
| 2
| 4
| 2
| 3
| 76
| 12
| 56
| 21
| 40
| 8
| 34
| 14
| 31
| 13
| 2
| 3
| 15
|
3,895
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
|
transformers.models.mobilenet_v1.modeling_mobilenet_v1.MobileNetV1Model
|
from .configuration_mobilenet_v1 import MobileNetV1Config
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
import torch
from ...utils import auto_docstring, logging
from torch import nn
@auto_docstring
class MobileNetV1Model(MobileNetV1PreTrainedModel):
def __init__(self, config: MobileNetV1Config, add_pooling_layer: bool=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
depth = 32
out_channels = max(int(depth * config.depth_multiplier), config.min_depth)
self.conv_stem = MobileNetV1ConvLayer(config, in_channels=config.num_channels, out_channels=out_channels, kernel_size=3, stride=2)
strides = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
self.layer = nn.ModuleList()
for i in range(13):
in_channels = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
out_channels = max(int(depth * config.depth_multiplier), config.min_depth)
self.layer.append(MobileNetV1ConvLayer(config, in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=strides[i], groups=in_channels))
self.layer.append(MobileNetV1ConvLayer(config, in_channels=in_channels, out_channels=out_channels, kernel_size=1))
self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None
self.post_init()
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.conv_stem(pixel_values)
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
last_hidden_state = hidden_states
if self.pooler is not None:
pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1)
else:
pooled_output = None
if not return_dict:
return tuple((v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None))
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=all_hidden_states)
|
@auto_docstring
class MobileNetV1Model(MobileNetV1PreTrainedModel):
def __init__(self, config: MobileNetV1Config, add_pooling_layer: bool=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def _prune_heads(self, heads_to_prune):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
pass
| 6
| 1
| 30
| 6
| 24
| 0
| 5
| 0.01
| 1
| 12
| 3
| 0
| 3
| 4
| 3
| 4
| 101
| 20
| 80
| 24
| 63
| 1
| 38
| 18
| 34
| 9
| 2
| 2
| 14
|
3,896
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
|
transformers.models.mobilenet_v1.modeling_mobilenet_v1.MobileNetV1PreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from torch import nn
from ...utils import auto_docstring, logging
from typing import Optional, Union
from .configuration_mobilenet_v1 import MobileNetV1Config
@auto_docstring
class MobileNetV1PreTrainedModel(PreTrainedModel):
config: MobileNetV1Config
base_model_prefix = 'mobilenet_v1'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = False
_no_split_modules = []
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class MobileNetV1PreTrainedModel(PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 9
| 0
| 8
| 1
| 4
| 0.33
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 22
| 2
| 15
| 8
| 13
| 5
| 14
| 8
| 12
| 4
| 1
| 2
| 4
|
3,897
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py
|
transformers.models.mobilenet_v2.configuration_mobilenet_v2.MobileNetV2Config
|
from ...configuration_utils import PretrainedConfig
class MobileNetV2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MobileNetV2Model`]. It is used to instantiate a
MobileNetV2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileNetV2
[google/mobilenet_v2_1.0_224](https://huggingface.co/google/mobilenet_v2_1.0_224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
depth_multiplier (`float`, *optional*, defaults to 1.0):
Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32
channels. This is sometimes also called "alpha" or "width multiplier".
depth_divisible_by (`int`, *optional*, defaults to 8):
The number of channels in each layer will always be a multiple of this number.
min_depth (`int`, *optional*, defaults to 8):
All layers will have at least this many channels.
expand_ratio (`float`, *optional*, defaults to 6.0):
The number of output channels of the first layer in each block is input channels times expansion ratio.
output_stride (`int`, *optional*, defaults to 32):
The ratio between the spatial resolution of the input and output feature maps. By default the model reduces
the input dimensions by a factor of 32. If `output_stride` is 8 or 16, the model uses dilated convolutions
on the depthwise layers instead of regular convolutions, so that the feature maps never become more than 8x
or 16x smaller than the input image.
first_layer_is_expansion (`bool`, *optional*, defaults to `True`):
True if the very first convolution layer is also the expansion layer for the first expansion block.
finegrained_output (`bool`, *optional*, defaults to `True`):
If true, the number of output channels in the final convolution layer will stay large (1280) even if
`depth_multiplier` is less than 1.
hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
tf_padding (`bool`, *optional*, defaults to `True`):
Whether to use TensorFlow padding rules on the convolution layers.
classifier_dropout_prob (`float`, *optional*, defaults to 0.8):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 0.001):
The epsilon used by the layer normalization layers.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
Example:
```python
>>> from transformers import MobileNetV2Config, MobileNetV2Model
>>> # Initializing a "mobilenet_v2_1.0_224" style configuration
>>> configuration = MobileNetV2Config()
>>> # Initializing a model from the "mobilenet_v2_1.0_224" style configuration
>>> model = MobileNetV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'mobilenet_v2'
def __init__(self, num_channels=3, image_size=224, depth_multiplier=1.0, depth_divisible_by=8, min_depth=8, expand_ratio=6.0, output_stride=32, first_layer_is_expansion=True, finegrained_output=True, hidden_act='relu6', tf_padding=True, classifier_dropout_prob=0.8, initializer_range=0.02, layer_norm_eps=0.001, semantic_loss_ignore_index=255, **kwargs):
super().__init__(**kwargs)
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.')
self.num_channels = num_channels
self.image_size = image_size
self.depth_multiplier = depth_multiplier
self.depth_divisible_by = depth_divisible_by
self.min_depth = min_depth
self.expand_ratio = expand_ratio
self.output_stride = output_stride
self.first_layer_is_expansion = first_layer_is_expansion
self.finegrained_output = finegrained_output
self.hidden_act = hidden_act
self.tf_padding = tf_padding
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.semantic_loss_ignore_index = semantic_loss_ignore_index
|
class MobileNetV2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MobileNetV2Model`]. It is used to instantiate a
MobileNetV2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileNetV2
[google/mobilenet_v2_1.0_224](https://huggingface.co/google/mobilenet_v2_1.0_224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
depth_multiplier (`float`, *optional*, defaults to 1.0):
Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32
channels. This is sometimes also called "alpha" or "width multiplier".
depth_divisible_by (`int`, *optional*, defaults to 8):
The number of channels in each layer will always be a multiple of this number.
min_depth (`int`, *optional*, defaults to 8):
All layers will have at least this many channels.
expand_ratio (`float`, *optional*, defaults to 6.0):
The number of output channels of the first layer in each block is input channels times expansion ratio.
output_stride (`int`, *optional*, defaults to 32):
The ratio between the spatial resolution of the input and output feature maps. By default the model reduces
the input dimensions by a factor of 32. If `output_stride` is 8 or 16, the model uses dilated convolutions
on the depthwise layers instead of regular convolutions, so that the feature maps never become more than 8x
or 16x smaller than the input image.
first_layer_is_expansion (`bool`, *optional*, defaults to `True`):
True if the very first convolution layer is also the expansion layer for the first expansion block.
finegrained_output (`bool`, *optional*, defaults to `True`):
If true, the number of output channels in the final convolution layer will stay large (1280) even if
`depth_multiplier` is less than 1.
hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
tf_padding (`bool`, *optional*, defaults to `True`):
Whether to use TensorFlow padding rules on the convolution layers.
classifier_dropout_prob (`float`, *optional*, defaults to 0.8):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 0.001):
The epsilon used by the layer normalization layers.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
Example:
```python
>>> from transformers import MobileNetV2Config, MobileNetV2Model
>>> # Initializing a "mobilenet_v2_1.0_224" style configuration
>>> configuration = MobileNetV2Config()
>>> # Initializing a model from the "mobilenet_v2_1.0_224" style configuration
>>> model = MobileNetV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_channels=3, image_size=224, depth_multiplier=1.0, depth_divisible_by=8, min_depth=8, expand_ratio=6.0, output_stride=32, first_layer_is_expansion=True, finegrained_output=True, hidden_act='relu6', tf_padding=True, classifier_dropout_prob=0.8, initializer_range=0.02, layer_norm_eps=0.001, semantic_loss_ignore_index=255, **kwargs):
pass
| 2
| 1
| 39
| 2
| 37
| 0
| 2
| 1.36
| 1
| 2
| 0
| 0
| 1
| 15
| 1
| 1
| 103
| 11
| 39
| 36
| 19
| 53
| 21
| 18
| 19
| 2
| 1
| 1
| 2
|
3,898
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py
|
transformers.models.mobilenet_v2.configuration_mobilenet_v2.MobileNetV2OnnxConfig
|
from ...onnx import OnnxConfig
from collections.abc import Mapping
from collections import OrderedDict
from packaging import version
class MobileNetV2OnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch'})])
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'image-classification':
return OrderedDict([('logits', {0: 'batch'})])
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})])
@property
def atol_for_validation(self) -> float:
return 0.0001
|
class MobileNetV2OnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
| 7
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 3
| 0
| 3
| 3
| 17
| 3
| 14
| 8
| 7
| 0
| 10
| 5
| 6
| 2
| 1
| 1
| 4
|
3,899
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py
|
transformers.models.mobilenet_v2.feature_extraction_mobilenet_v2.MobileNetV2FeatureExtractor
|
import warnings
from ...utils.import_utils import requires
from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor
@requires(backends=('vision',))
class MobileNetV2FeatureExtractor(MobileNetV2ImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class MobileNetV2FeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use MobileNetV2ImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class MobileNetV2FeatureExtractor(MobileNetV2ImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 25
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.