id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,000
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/configuration_ibert.py
|
transformers.models.ibert.configuration_ibert.IBertConfig
|
from ...configuration_utils import PretrainedConfig
class IBertConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`IBertModel`]. It is used to instantiate a I-BERT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the IBERT
[kssteven/ibert-roberta-base](https://huggingface.co/kssteven/ibert-roberta-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the I-BERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`IBertModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`IBertModel`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
quant_mode (`bool`, *optional*, defaults to `False`):
Whether to quantize the model or not.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize specific nonlinear layer. Dequantized layers are then executed with full precision.
`"none"`, `"gelu"`, `"softmax"`, `"layernorm"` and `"nonlinear"` are supported. As default, it is set as
`"none"`, which does not dequantize any layers. Please specify `"gelu"`, `"softmax"`, or `"layernorm"` to
dequantize GELU, Softmax, or LayerNorm, respectively. `"nonlinear"` will dequantize all nonlinear layers,
i.e., GELU, Softmax, and LayerNorm.
"""
model_type = 'ibert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', quant_mode=False, force_dequant='none', **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.quant_mode = quant_mode
self.force_dequant = force_dequant
|
class IBertConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`IBertModel`]. It is used to instantiate a I-BERT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the IBERT
[kssteven/ibert-roberta-base](https://huggingface.co/kssteven/ibert-roberta-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the I-BERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`IBertModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`IBertModel`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
quant_mode (`bool`, *optional*, defaults to `False`):
Whether to quantize the model or not.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize specific nonlinear layer. Dequantized layers are then executed with full precision.
`"none"`, `"gelu"`, `"softmax"`, `"layernorm"` and `"nonlinear"` are supported. As default, it is set as
`"none"`, which does not dequantize any layers. Please specify `"gelu"`, `"softmax"`, or `"layernorm"` to
dequantize GELU, Softmax, or LayerNorm, respectively. `"nonlinear"` will dequantize all nonlinear layers,
i.e., GELU, Softmax, and LayerNorm.
'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', quant_mode=False, force_dequant='none', **kwargs):
pass
| 2
| 1
| 39
| 1
| 38
| 0
| 1
| 1.25
| 1
| 1
| 0
| 0
| 1
| 15
| 1
| 1
| 95
| 5
| 40
| 39
| 17
| 50
| 19
| 18
| 17
| 1
| 1
| 0
| 1
|
3,001
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/configuration_ibert.py
|
transformers.models.ibert.configuration_ibert.IBertOnnxConfig
|
from ...onnx import OnnxConfig
from collections.abc import Mapping
from collections import OrderedDict
class IBertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
|
class IBertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
| 3
| 0
| 11
| 0
| 11
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 13
| 0
| 13
| 4
| 10
| 0
| 6
| 3
| 4
| 2
| 1
| 1
| 2
|
3,002
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertAttention
|
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from torch import nn
class IBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.self = IBertSelfAttention(config)
self.output = IBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False):
self_outputs, self_outputs_scaling_factor = self.self(hidden_states, hidden_states_scaling_factor, attention_mask, head_mask, output_attentions)
attention_output, attention_output_scaling_factor = self.output(self_outputs[0], self_outputs_scaling_factor[0], hidden_states, hidden_states_scaling_factor)
outputs = (attention_output,) + self_outputs[1:]
outputs_scaling_factor = (attention_output_scaling_factor,) + self_outputs_scaling_factor[1:]
return (outputs, outputs_scaling_factor)
|
class IBertAttention(nn.Module):
def __init__(self, config):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False):
pass
| 4
| 0
| 15
| 1
| 13
| 1
| 1
| 0.07
| 1
| 4
| 2
| 0
| 3
| 4
| 3
| 13
| 47
| 4
| 41
| 20
| 30
| 3
| 24
| 13
| 20
| 2
| 1
| 1
| 4
|
3,003
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertClassificationHead
|
from torch import nn
import torch
class IBertClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
hidden_states = features[:, 0, :]
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
|
class IBertClassificationHead(nn.Module):
'''Head for sentence-level classification tasks.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
| 3
| 1
| 7
| 0
| 7
| 1
| 1
| 0.14
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 17
| 2
| 14
| 7
| 11
| 2
| 14
| 7
| 11
| 1
| 1
| 0
| 2
|
3,004
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertEmbeddings
|
from torch import nn
import torch
from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear
class IBertEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.embedding_bit = 8
self.embedding_act_bit = 16
self.act_bit = 8
self.ln_input_bit = 22
self.ln_output_bit = 32
self.word_embeddings = QuantEmbedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id, weight_bit=self.embedding_bit, quant_mode=self.quant_mode)
self.token_type_embeddings = QuantEmbedding(config.type_vocab_size, config.hidden_size, weight_bit=self.embedding_bit, quant_mode=self.quant_mode)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.padding_idx = config.pad_token_id
self.position_embeddings = QuantEmbedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx, weight_bit=self.embedding_bit, quant_mode=self.quant_mode)
self.embeddings_act1 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode)
self.embeddings_act2 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode)
self.LayerNorm = IntLayerNorm(config.hidden_size, eps=config.layer_norm_eps, output_bit=self.ln_output_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant)
self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
if position_ids is None:
if input_ids is not None:
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds, inputs_embeds_scaling_factor = self.word_embeddings(input_ids)
else:
inputs_embeds_scaling_factor = None
token_type_embeddings, token_type_embeddings_scaling_factor = self.token_type_embeddings(token_type_ids)
embeddings, embeddings_scaling_factor = self.embeddings_act1(inputs_embeds, inputs_embeds_scaling_factor, identity=token_type_embeddings, identity_scaling_factor=token_type_embeddings_scaling_factor)
if self.position_embedding_type == 'absolute':
position_embeddings, position_embeddings_scaling_factor = self.position_embeddings(position_ids)
embeddings, embeddings_scaling_factor = self.embeddings_act1(embeddings, embeddings_scaling_factor, identity=position_embeddings, identity_scaling_factor=position_embeddings_scaling_factor)
embeddings, embeddings_scaling_factor = self.LayerNorm(embeddings, embeddings_scaling_factor)
embeddings = self.dropout(embeddings)
embeddings, embeddings_scaling_factor = self.output_activation(embeddings, embeddings_scaling_factor)
return (embeddings, embeddings_scaling_factor)
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape)
|
class IBertEmbeddings(nn.Module):
'''
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
'''
def __init__(self, config):
pass
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
pass
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
'''
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
'''
pass
| 4
| 2
| 38
| 5
| 29
| 4
| 3
| 0.17
| 1
| 4
| 3
| 0
| 3
| 16
| 3
| 13
| 120
| 17
| 88
| 30
| 82
| 15
| 47
| 28
| 43
| 7
| 1
| 2
| 9
|
3,005
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertEncoder
|
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
class IBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.quant_mode = config.quant_mode
self.layer = nn.ModuleList([IBertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, hidden_states_scaling_factor, attention_mask, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
|
class IBertEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
pass
| 3
| 0
| 30
| 3
| 27
| 1
| 5
| 0.04
| 1
| 6
| 2
| 0
| 2
| 3
| 2
| 12
| 62
| 7
| 55
| 22
| 43
| 2
| 24
| 13
| 21
| 9
| 1
| 2
| 10
|
3,006
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertForMaskedLM
|
import torch
from ...utils import auto_docstring, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
@auto_docstring
class IBertForMaskedLM(IBertPreTrainedModel):
_tied_weights_keys = ['lm_head.decoder.bias', 'lm_head.decoder.weight']
def __init__(self, config):
super().__init__(config)
self.ibert = IBertModel(config, add_pooling_layer=False)
self.lm_head = IBertLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
self.lm_head.bias = new_embeddings.bias
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MaskedLMOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.ibert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class IBertForMaskedLM(IBertPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MaskedLMOutput, tuple[torch.FloatTensor]]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
'''
pass
| 7
| 1
| 16
| 2
| 13
| 2
| 2
| 0.15
| 1
| 5
| 3
| 0
| 4
| 2
| 4
| 6
| 78
| 10
| 59
| 27
| 35
| 9
| 25
| 14
| 20
| 5
| 2
| 1
| 8
|
3,007
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertForMultipleChoice
|
import torch
from ...utils import auto_docstring, logging
from typing import Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
@auto_docstring
class IBertForMultipleChoice(IBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.ibert = IBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MultipleChoiceModelOutput, tuple[torch.FloatTensor]]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
outputs = self.ibert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class IBertForMultipleChoice(IBertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MultipleChoiceModelOutput, tuple[torch.FloatTensor]]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
'''
pass
| 5
| 1
| 37
| 4
| 29
| 4
| 6
| 0.11
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 4
| 81
| 9
| 65
| 32
| 44
| 7
| 28
| 19
| 25
| 11
| 2
| 1
| 12
|
3,008
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertForQuestionAnswering
|
from typing import Optional, Union
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from torch import nn
@auto_docstring
class IBertForQuestionAnswering(IBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.ibert = IBertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[QuestionAnsweringModelOutput, tuple[torch.FloatTensor]]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.ibert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class IBertForQuestionAnswering(IBertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[QuestionAnsweringModelOutput, tuple[torch.FloatTensor]]:
pass
| 5
| 0
| 41
| 5
| 30
| 7
| 4
| 0.19
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 4
| 90
| 10
| 67
| 30
| 45
| 13
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
3,009
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertForSequenceClassification
|
import torch
from typing import Optional, Union
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring(custom_intro='\n I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ')
class IBertForSequenceClassification(IBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.ibert = IBertModel(config, add_pooling_layer=False)
self.classifier = IBertClassificationHead(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[SequenceClassifierOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.ibert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ')
class IBertForSequenceClassification(IBertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[SequenceClassifierOutput, tuple[torch.FloatTensor]]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 38
| 3
| 32
| 4
| 7
| 0.1
| 1
| 6
| 3
| 0
| 2
| 3
| 2
| 4
| 84
| 7
| 70
| 25
| 49
| 7
| 32
| 12
| 29
| 12
| 2
| 3
| 13
|
3,010
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertForTokenClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, logging
from torch import nn
import torch
from typing import Optional, Union
@auto_docstring
class IBertForTokenClassification(IBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.ibert = IBertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[TokenClassifierOutput, tuple[torch.FloatTensor]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.ibert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class IBertForTokenClassification(IBertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[TokenClassifierOutput, tuple[torch.FloatTensor]]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 5
| 1
| 31
| 4
| 24
| 3
| 3
| 0.09
| 1
| 4
| 2
| 0
| 2
| 4
| 2
| 4
| 69
| 9
| 55
| 26
| 34
| 5
| 22
| 13
| 19
| 5
| 2
| 1
| 6
|
3,011
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertIntermediate
|
from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear
from torch import nn
class IBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.act_bit = 8
self.weight_bit = 8
self.bias_bit = 32
self.dense = QuantLinear(config.hidden_size, config.intermediate_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True)
if config.hidden_act != 'gelu':
raise ValueError("I-BERT only supports 'gelu' for `config.hidden_act`")
self.intermediate_act_fn = IntGELU(quant_mode=self.quant_mode, force_dequant=config.force_dequant)
self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
def forward(self, hidden_states, hidden_states_scaling_factor):
hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor)
hidden_states, hidden_states_scaling_factor = self.intermediate_act_fn(hidden_states, hidden_states_scaling_factor)
hidden_states, hidden_states_scaling_factor = self.output_activation(hidden_states, hidden_states_scaling_factor)
return (hidden_states, hidden_states_scaling_factor)
|
class IBertIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, hidden_states_scaling_factor):
pass
| 3
| 0
| 15
| 1
| 14
| 1
| 2
| 0.03
| 1
| 5
| 3
| 0
| 2
| 7
| 2
| 12
| 32
| 2
| 29
| 10
| 26
| 1
| 17
| 10
| 14
| 2
| 1
| 1
| 3
|
3,012
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertLMHead
|
import torch
from ...activations import gelu
from torch import nn
class IBertLMHead(nn.Module):
"""I-BERT Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
x = self.decoder(x)
return x
def _tie_weights(self) -> None:
if self.decoder.bias.device.type == 'meta':
self.decoder.bias = self.bias
else:
self.bias = self.decoder.bias
|
class IBertLMHead(nn.Module):
'''I-BERT Head for masked language modeling.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
def _tie_weights(self) -> None:
pass
| 4
| 1
| 8
| 1
| 6
| 1
| 1
| 0.21
| 1
| 1
| 0
| 0
| 3
| 4
| 3
| 13
| 29
| 6
| 19
| 9
| 15
| 4
| 18
| 9
| 14
| 2
| 1
| 1
| 4
|
3,013
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertLayer
|
from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear
from torch import nn
class IBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.act_bit = 8
self.seq_len_dim = 1
self.attention = IBertAttention(config)
self.intermediate = IBertIntermediate(config)
self.output = IBertOutput(config)
self.pre_intermediate_act = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.pre_output_act = QuantAct(self.act_bit, quant_mode=self.quant_mode)
def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False):
self_attention_outputs, self_attention_outputs_scaling_factor = self.attention(hidden_states, hidden_states_scaling_factor, attention_mask, head_mask, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
attention_output_scaling_factor = self_attention_outputs_scaling_factor[0]
outputs = self_attention_outputs[1:]
layer_output, layer_output_scaling_factor = self.feed_forward_chunk(attention_output, attention_output_scaling_factor)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output, attention_output_scaling_factor):
attention_output, attention_output_scaling_factor = self.pre_intermediate_act(attention_output, attention_output_scaling_factor)
intermediate_output, intermediate_output_scaling_factor = self.intermediate(attention_output, attention_output_scaling_factor)
intermediate_output, intermediate_output_scaling_factor = self.pre_output_act(intermediate_output, intermediate_output_scaling_factor)
layer_output, layer_output_scaling_factor = self.output(intermediate_output, intermediate_output_scaling_factor, attention_output, attention_output_scaling_factor)
return (layer_output, layer_output_scaling_factor)
|
class IBertLayer(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False):
pass
def feed_forward_chunk(self, attention_output, attention_output_scaling_factor):
pass
| 4
| 0
| 18
| 2
| 16
| 0
| 1
| 0.02
| 1
| 5
| 4
| 0
| 3
| 8
| 3
| 13
| 56
| 8
| 48
| 26
| 37
| 1
| 25
| 19
| 21
| 1
| 1
| 0
| 3
|
3,014
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertModel
|
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from typing import Optional, Union
@auto_docstring
class IBertModel(IBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
"""
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.quant_mode = config.quant_mode
self.embeddings = IBertEmbeddings(config)
self.encoder = IBertEncoder(config)
self.pooler = IBertPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, tuple[torch.FloatTensor]]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output, embedding_output_scaling_factor = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(embedding_output, embedding_output_scaling_factor, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
|
@auto_docstring
class IBertModel(IBertPreTrainedModel):
'''
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
'''
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, tuple[torch.FloatTensor]]:
pass
| 8
| 3
| 20
| 2
| 15
| 2
| 4
| 0.22
| 1
| 8
| 4
| 0
| 5
| 5
| 5
| 7
| 119
| 18
| 83
| 32
| 60
| 18
| 41
| 20
| 35
| 12
| 2
| 1
| 18
|
3,015
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertOutput
|
from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear
from torch import nn
class IBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.act_bit = 8
self.weight_bit = 8
self.bias_bit = 32
self.ln_input_bit = 22
self.ln_output_bit = 32
self.dense = QuantLinear(config.intermediate_size, config.hidden_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True)
self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode)
self.LayerNorm = IntLayerNorm(config.hidden_size, eps=config.layer_norm_eps, output_bit=self.ln_output_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant)
self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor):
hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor)
hidden_states = self.dropout(hidden_states)
hidden_states, hidden_states_scaling_factor = self.ln_input_act(hidden_states, hidden_states_scaling_factor, identity=input_tensor, identity_scaling_factor=input_tensor_scaling_factor)
hidden_states, hidden_states_scaling_factor = self.LayerNorm(hidden_states, hidden_states_scaling_factor)
hidden_states, hidden_states_scaling_factor = self.output_activation(hidden_states, hidden_states_scaling_factor)
return (hidden_states, hidden_states_scaling_factor)
|
class IBertOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor):
pass
| 3
| 0
| 22
| 1
| 21
| 0
| 1
| 0
| 1
| 4
| 3
| 0
| 2
| 11
| 2
| 12
| 45
| 3
| 42
| 14
| 39
| 0
| 21
| 14
| 18
| 1
| 1
| 0
| 2
|
3,016
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertPooler
|
from torch import nn
class IBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class IBertPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.18
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 14
| 1
| 11
| 8
| 8
| 2
| 11
| 8
| 8
| 1
| 1
| 0
| 2
|
3,017
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertPreTrainedModel
|
from ...utils import auto_docstring, logging
from .configuration_ibert import IBertConfig
from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear
from ...modeling_utils import PreTrainedModel
from torch import nn
@auto_docstring
class IBertPreTrainedModel(PreTrainedModel):
config: IBertConfig
base_model_prefix = 'ibert'
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (QuantLinear, nn.Linear)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (QuantEmbedding, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, (IntLayerNorm, nn.LayerNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, IBertLMHead):
module.bias.data.zero_()
def resize_token_embeddings(self, new_num_tokens=None):
raise NotImplementedError('`resize_token_embeddings` is not supported for I-BERT.')
|
@auto_docstring
class IBertPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
def resize_token_embeddings(self, new_num_tokens=None):
pass
| 4
| 1
| 9
| 0
| 7
| 2
| 4
| 0.41
| 1
| 4
| 3
| 6
| 2
| 0
| 2
| 2
| 27
| 3
| 17
| 5
| 14
| 7
| 15
| 5
| 12
| 6
| 1
| 2
| 7
|
3,018
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertSelfAttention
|
from torch import nn
import torch
from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear
import math
class IBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.quant_mode = config.quant_mode
self.weight_bit = 8
self.bias_bit = 32
self.act_bit = 8
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = QuantLinear(config.hidden_size, self.all_head_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True)
self.key = QuantLinear(config.hidden_size, self.all_head_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True)
self.value = QuantLinear(config.hidden_size, self.all_head_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True)
self.query_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.key_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.value_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type != 'absolute':
raise ValueError("I-BERT only supports 'absolute' for `config.position_embedding_type`")
self.softmax = IntSoftmax(self.act_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant)
def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False):
mixed_query_layer, mixed_query_layer_scaling_factor = self.query(hidden_states, hidden_states_scaling_factor)
mixed_key_layer, mixed_key_layer_scaling_factor = self.key(hidden_states, hidden_states_scaling_factor)
mixed_value_layer, mixed_value_layer_scaling_factor = self.value(hidden_states, hidden_states_scaling_factor)
query_layer, query_layer_scaling_factor = self.query_activation(mixed_query_layer, mixed_query_layer_scaling_factor)
key_layer, key_layer_scaling_factor = self.key_activation(mixed_key_layer, mixed_key_layer_scaling_factor)
value_layer, value_layer_scaling_factor = self.value_activation(mixed_value_layer, mixed_value_layer_scaling_factor)
batch_size, seq_length, _ = hidden_states.shape
query_layer = query_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = key_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = value_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
scale = math.sqrt(self.attention_head_size)
attention_scores = attention_scores / scale
if self.quant_mode:
attention_scores_scaling_factor = query_layer_scaling_factor * key_layer_scaling_factor / scale
else:
attention_scores_scaling_factor = None
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs, attention_probs_scaling_factor = self.softmax(attention_scores, attention_scores_scaling_factor)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
if attention_probs_scaling_factor is not None:
context_layer_scaling_factor = attention_probs_scaling_factor * value_layer_scaling_factor
else:
context_layer_scaling_factor = None
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
context_layer, context_layer_scaling_factor = self.output_activation(context_layer, context_layer_scaling_factor)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
output_scaling_factor = (context_layer_scaling_factor, attention_probs_scaling_factor) if output_attentions else (context_layer_scaling_factor,)
return (outputs, output_scaling_factor)
|
class IBertSelfAttention(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False):
pass
| 3
| 0
| 46
| 6
| 36
| 4
| 4
| 0.11
| 1
| 6
| 3
| 0
| 3
| 17
| 3
| 13
| 140
| 19
| 109
| 44
| 98
| 12
| 61
| 37
| 57
| 7
| 1
| 1
| 11
|
3,019
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/modeling_ibert.py
|
transformers.models.ibert.modeling_ibert.IBertSelfOutput
|
from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear
from torch import nn
class IBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.act_bit = 8
self.weight_bit = 8
self.bias_bit = 32
self.ln_input_bit = 22
self.ln_output_bit = 32
self.dense = QuantLinear(config.hidden_size, config.hidden_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True)
self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode)
self.LayerNorm = IntLayerNorm(config.hidden_size, eps=config.layer_norm_eps, output_bit=self.ln_output_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant)
self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor):
hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor)
hidden_states = self.dropout(hidden_states)
hidden_states, hidden_states_scaling_factor = self.ln_input_act(hidden_states, hidden_states_scaling_factor, identity=input_tensor, identity_scaling_factor=input_tensor_scaling_factor)
hidden_states, hidden_states_scaling_factor = self.LayerNorm(hidden_states, hidden_states_scaling_factor)
hidden_states, hidden_states_scaling_factor = self.output_activation(hidden_states, hidden_states_scaling_factor)
return (hidden_states, hidden_states_scaling_factor)
|
class IBertSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor):
pass
| 3
| 0
| 22
| 1
| 21
| 0
| 1
| 0
| 1
| 4
| 3
| 0
| 2
| 11
| 2
| 12
| 45
| 3
| 42
| 14
| 39
| 0
| 21
| 14
| 18
| 1
| 1
| 0
| 2
|
3,020
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/quant_modules.py
|
transformers.models.ibert.quant_modules.FixedPointMul
|
import torch
from torch.autograd import Function
class FixedPointMul(Function):
"""
Function to perform fixed-point arithmetic that can match integer arithmetic on hardware.
Args:
pre_act (`torch.Tensor`):
Input tensor.
pre_act_scaling_factor (`torch.Tensor`):
Scaling factor of the input tensor *pre_act*.
bit_num (`int`):
Quantization bitwidth.
z_scaling_factor (`torch.Tensor`):
Scaling factor of the output tensor.
identity (`torch.Tensor`, *optional*):
Identity tensor, if exists.
identity_scaling_factor (`torch.Tensor`, *optional*):
Scaling factor of the identity tensor *identity*, if exists.
Returns:
`torch.Tensor`: Output tensor(*pre_act* if *identity* is not given, otherwise the addition of *pre_act* and
*identity*), whose scale is rescaled to *z_scaling_factor*.
"""
@staticmethod
def forward(ctx, pre_act, pre_act_scaling_factor, bit_num, z_scaling_factor, identity=None, identity_scaling_factor=None):
if len(pre_act_scaling_factor.shape) == 3:
def reshape(x):
return x
else:
def reshape(x):
return x.view(1, 1, -1)
ctx.identity = identity
n = 2 ** (bit_num - 1) - 1
with torch.no_grad():
pre_act_scaling_factor = reshape(pre_act_scaling_factor)
if identity is not None:
identity_scaling_factor = reshape(identity_scaling_factor)
ctx.z_scaling_factor = z_scaling_factor
z_int = torch.round(pre_act / pre_act_scaling_factor)
_A = pre_act_scaling_factor.type(torch.double)
_B = z_scaling_factor.type(torch.float).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m, e = batch_frexp(new_scale)
output = z_int.type(torch.double) * m.type(torch.double)
output = torch.round(output / 2.0 ** e)
if identity is not None:
wx_int = torch.round(identity / identity_scaling_factor)
_A = identity_scaling_factor.type(torch.double)
_B = z_scaling_factor.type(torch.float).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m1, e1 = batch_frexp(new_scale)
output1 = wx_int.type(torch.double) * m1.type(torch.double)
output1 = torch.round(output1 / 2.0 ** e1)
output = output1 + output
return torch.clamp(output.type(torch.float), -n - 1, n)
@staticmethod
def backward(ctx, grad_output):
identity_grad = None
if ctx.identity is not None:
identity_grad = grad_output.clone() / ctx.z_scaling_factor
return (grad_output.clone() / ctx.z_scaling_factor, None, None, None, None, identity_grad, None)
|
class FixedPointMul(Function):
'''
Function to perform fixed-point arithmetic that can match integer arithmetic on hardware.
Args:
pre_act (`torch.Tensor`):
Input tensor.
pre_act_scaling_factor (`torch.Tensor`):
Scaling factor of the input tensor *pre_act*.
bit_num (`int`):
Quantization bitwidth.
z_scaling_factor (`torch.Tensor`):
Scaling factor of the output tensor.
identity (`torch.Tensor`, *optional*):
Identity tensor, if exists.
identity_scaling_factor (`torch.Tensor`, *optional*):
Scaling factor of the identity tensor *identity*, if exists.
Returns:
`torch.Tensor`: Output tensor(*pre_act* if *identity* is not given, otherwise the addition of *pre_act* and
*identity*), whose scale is rescaled to *z_scaling_factor*.
'''
@staticmethod
def forward(ctx, pre_act, pre_act_scaling_factor, bit_num, z_scaling_factor, identity=None, identity_scaling_factor=None):
pass
def reshape(x):
pass
def reshape(x):
pass
@staticmethod
def backward(ctx, grad_output):
pass
| 7
| 1
| 28
| 6
| 22
| 2
| 3
| 0.47
| 1
| 1
| 0
| 0
| 0
| 0
| 2
| 2
| 82
| 15
| 47
| 25
| 34
| 22
| 36
| 15
| 33
| 4
| 1
| 2
| 6
|
3,021
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/quant_modules.py
|
transformers.models.ibert.quant_modules.IntGELU
|
import torch
from torch import nn
class IntGELU(nn.Module):
"""
Quantized version of `torch.nn.GELU`. Adds quantization-specific arguments on top of `torch.nn.GELU`.
Args:
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "gelu" or "nonlinear" is given.
"""
def __init__(self, quant_mode=True, force_dequant='none'):
super().__init__()
self.quant_mode = quant_mode
if force_dequant in ['nonlinear', 'gelu']:
logger.info('Force dequantize gelu')
self.quant_mode = False
if not self.quant_mode:
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14
self.coeff = [-0.2888, -1.769, 1]
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor(self.coeff[1] / scaling_factor)
c_int = torch.floor(self.coeff[2] / scaling_factor ** 2)
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), -b_int)
y_int = sign * ((abs_int + b_int) ** 2 + c_int)
scaling_factor = scaling_factor ** 2 * self.coeff[0]
y_int = floor_ste.apply(y_int / 2 ** self.const)
scaling_factor = scaling_factor * 2 ** self.const
return (y_int, scaling_factor)
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
return (self.activation_fn(x), None)
x_int = x / scaling_factor
sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
shift_int = 1.0 // sigmoid_scaling_factor
x_int = x_int * (sigmoid_int + shift_int)
scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
return (x_int * scaling_factor, scaling_factor)
|
class IntGELU(nn.Module):
'''
Quantized version of `torch.nn.GELU`. Adds quantization-specific arguments on top of `torch.nn.GELU`.
Args:
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "gelu" or "nonlinear" is given.
'''
def __init__(self, quant_mode=True, force_dequant='none'):
pass
def int_erf(self, x_int, scaling_factor):
pass
def forward(self, x, scaling_factor=None):
pass
| 4
| 1
| 14
| 3
| 10
| 1
| 2
| 0.34
| 1
| 2
| 1
| 0
| 3
| 5
| 3
| 13
| 55
| 14
| 32
| 17
| 28
| 11
| 32
| 17
| 28
| 3
| 1
| 1
| 6
|
3,022
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/quant_modules.py
|
transformers.models.ibert.quant_modules.IntLayerNorm
|
import torch
from torch import nn
class IntLayerNorm(nn.Module):
"""
Quantized version of `torch.nn.LayerNorm`. Adds quantization-specific arguments on top of `torch.nn.LayerNorm`.
Args:
output_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the layer output activation.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "layernorm" or "nonlinear" is given.
"""
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant='none'):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.quant_mode = quant_mode
if force_dequant in ['nonlinear', 'layernorm']:
logger.info('Force dequantize layernorm')
self.quant_mode = False
self.register_buffer('shift', torch.zeros(1))
self.output_bit = output_bit
self.max_bit = 32
self.dim_sqrt = None
self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
def set_shift(self, y_int):
with torch.no_grad():
y_sq_int = y_int ** 2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
shift = torch.log2(torch.sqrt(var_int / 2 ** self.max_bit)).ceil().max()
shift_old = self.shift
self.shift = torch.max(self.shift, shift)
logger.info(f'Dynamic shift adjustment: {int(shift_old)} -> {int(self.shift)}')
def overflow_fallback(self, y_int):
"""
This fallback function is called when overflow is detected during training time, and adjusts the `self.shift`
to avoid overflow in the subsequent runs.
"""
self.set_shift(y_int)
y_int_shifted = floor_ste.apply(y_int / 2 ** self.shift)
y_sq_int = y_int_shifted ** 2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
return var_int
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
mean = x.mean(axis=2, keepdim=True)
y = x - mean
var = torch.mean(y ** 2, axis=2, keepdim=True)
x = y / torch.sqrt(self.eps + var)
x = x * self.weight + self.bias
return (x, None)
if self.dim_sqrt is None:
n = torch.tensor(x.shape[2], dtype=torch.float)
self.dim_sqrt = torch.sqrt(n).to(x.device)
x_int = x / scaling_factor
mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
y_int = x_int - mean_int
y_int_shifted = floor_ste.apply(y_int / 2 ** self.shift)
y_sq_int = y_int_shifted ** 2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
if self.training:
if var_int.max() >= 2 ** self.max_bit:
var_int = self.overflow_fallback(y_int)
assert var_int.max() < 2 ** self.max_bit + 0.1, 'Error detected in overflow handling: `var_int` exceeds `self.max_bit` (the maximum possible bit width)'
std_int = floor_ste.apply(torch.sqrt(var_int)) * 2 ** self.shift
factor = floor_ste.apply(2 ** 31 / std_int)
y_int = floor_ste.apply(y_int * factor / 2)
scaling_factor = self.dim_sqrt / 2 ** 30
bias = self.bias.data.detach() / self.weight.data.detach()
bias_int = floor_ste.apply(bias / scaling_factor)
y_int = y_int + bias_int
scaling_factor = scaling_factor * self.weight
x = y_int * scaling_factor
return (x, scaling_factor)
|
class IntLayerNorm(nn.Module):
'''
Quantized version of `torch.nn.LayerNorm`. Adds quantization-specific arguments on top of `torch.nn.LayerNorm`.
Args:
output_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the layer output activation.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "layernorm" or "nonlinear" is given.
'''
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant='none'):
pass
def set_shift(self, y_int):
pass
def overflow_fallback(self, y_int):
'''
This fallback function is called when overflow is detected during training time, and adjusts the `self.shift`
to avoid overflow in the subsequent runs.
'''
pass
def forward(self, x, scaling_factor=None):
pass
| 5
| 2
| 21
| 3
| 16
| 3
| 2
| 0.33
| 1
| 6
| 3
| 0
| 4
| 10
| 4
| 14
| 99
| 15
| 64
| 36
| 59
| 21
| 61
| 36
| 56
| 5
| 1
| 2
| 9
|
3,023
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/quant_modules.py
|
transformers.models.ibert.quant_modules.IntSoftmax
|
import torch
from torch import nn
class IntSoftmax(nn.Module):
"""
Quantized version of `torch.nn.Softmax`. Adds quantization-specific arguments on top of `torch.nn.Softmax`.
Args:
output_bit (`int`):
Bitwidth for the layer output activation.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "softmax" or "nonlinear" is given.
"""
def __init__(self, output_bit, quant_mode=False, force_dequant='none'):
super().__init__()
self.output_bit = output_bit
self.max_bit = 32
self.quant_mode = quant_mode
if force_dequant in ['nonlinear', 'softmax']:
logger.info('Force dequantize softmax')
self.quant_mode = False
self.act = QuantAct(16, quant_mode=self.quant_mode)
self.x0 = -0.6931
self.const = 30
self.coef = [0.35815147, 0.96963238, 1.0]
self.coef[1] /= self.coef[0]
self.coef[2] /= self.coef[0]
def int_polynomial(self, x_int, scaling_factor):
with torch.no_grad():
b_int = torch.floor(self.coef[1] / scaling_factor)
c_int = torch.floor(self.coef[2] / scaling_factor ** 2)
z = (x_int + b_int) * x_int + c_int
scaling_factor = self.coef[0] * scaling_factor ** 2
return (z, scaling_factor)
def int_exp(self, x_int, scaling_factor):
with torch.no_grad():
x0_int = torch.floor(self.x0 / scaling_factor)
x_int = torch.max(x_int, self.const * x0_int)
q = floor_ste.apply(x_int / x0_int)
r = x_int - x0_int * q
exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)
exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)
scaling_factor = exp_scaling_factor / 2 ** self.const
return (exp_int, scaling_factor)
def forward(self, x, scaling_factor):
if not self.quant_mode:
return (nn.functional.softmax(x, dim=-1), None)
x_int = x / scaling_factor
x_int_max, _ = x_int.max(dim=-1, keepdim=True)
x_int = x_int - x_int_max
exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)
exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)
exp_int = exp / exp_scaling_factor
exp_int_sum = exp_int.sum(dim=-1, keepdim=True)
factor = floor_ste.apply(2 ** self.max_bit / exp_int_sum)
exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))
scaling_factor = 1 / 2 ** self.output_bit
return (exp_int * scaling_factor, scaling_factor)
|
class IntSoftmax(nn.Module):
'''
Quantized version of `torch.nn.Softmax`. Adds quantization-specific arguments on top of `torch.nn.Softmax`.
Args:
output_bit (`int`):
Bitwidth for the layer output activation.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "softmax" or "nonlinear" is given.
'''
def __init__(self, output_bit, quant_mode=False, force_dequant='none'):
pass
def int_polynomial(self, x_int, scaling_factor):
pass
def int_exp(self, x_int, scaling_factor):
pass
def forward(self, x, scaling_factor):
pass
| 5
| 1
| 13
| 2
| 11
| 1
| 2
| 0.3
| 1
| 3
| 2
| 0
| 4
| 7
| 4
| 14
| 69
| 12
| 46
| 25
| 41
| 14
| 46
| 25
| 41
| 2
| 1
| 1
| 6
|
3,024
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/quant_modules.py
|
transformers.models.ibert.quant_modules.QuantAct
|
from torch import nn
import torch
class QuantAct(nn.Module):
"""
Quantizes the given activation.
Args:
activation_bit (`int`):
Bitwidth for the quantized activation.
act_range_momentum (`float`, *optional*, defaults to `0.95`):
Momentum for updating the activation quantization range.
per_channel (`bool`, *optional*, defaults to `False`):
Whether to or not use channel-wise quantization.
channel_len (`int`, *optional*):
Specify the channel length when set the *per_channel* True.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
"""
def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False):
super().__init__()
self.activation_bit = activation_bit
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
self.per_channel = per_channel
self.percentile = False
self.act_function = SymmetricQuantFunction.apply
if not self.per_channel:
self.register_buffer('x_min', torch.zeros(1))
self.register_buffer('x_max', torch.zeros(1))
self.register_buffer('act_scaling_factor', torch.zeros(1))
self.x_min -= 1e-05
self.x_max += 1e-05
else:
raise NotImplementedError('per-channel mode is not currently supported for activation.')
def __repr__(self):
return f'{self.__class__.__name__}(activation_bit={self.activation_bit}, quant_mode: {self.quant_mode}, Act_min: {self.x_min.item():.2f}, Act_max: {self.x_max.item():.2f})'
def forward(self, x, pre_act_scaling_factor=None, identity=None, identity_scaling_factor=None, specified_min=None, specified_max=None):
x_act = x if identity is None else identity + x
if self.training:
assert not self.percentile, 'percentile mode is not currently supported for activation.'
assert not self.per_channel, 'per-channel mode is not currently supported for activation.'
x_min = x_act.data.min()
x_max = x_act.data.max()
assert x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0, 'NaN detected when computing min/max of the activation'
if self.x_min.min() > -1.1e-05 and self.x_max.max() < 1.1e-05:
self.x_min = self.x_min + x_min
self.x_max = self.x_max + x_max
elif self.act_range_momentum == -1:
self.x_min = torch.min(self.x_min, x_min)
self.x_max = torch.max(self.x_max, x_max)
else:
self.x_min = self.x_min * self.act_range_momentum + x_min * (1 - self.act_range_momentum)
self.x_max = self.x_max * self.act_range_momentum + x_max * (1 - self.act_range_momentum)
if not self.quant_mode:
return (x_act, None)
x_min = self.x_min if specified_min is None else specified_min
x_max = self.x_max if specified_max is None else specified_max
self.act_scaling_factor = symmetric_linear_quantization_params(self.activation_bit, x_min, x_max, per_channel=self.per_channel)
if pre_act_scaling_factor is None:
quant_act_int = self.act_function(x, self.activation_bit, self.percentile, self.act_scaling_factor)
else:
quant_act_int = FixedPointMul.apply(x, pre_act_scaling_factor, self.activation_bit, self.act_scaling_factor, identity, identity_scaling_factor)
correct_output_scale = self.act_scaling_factor.view(-1)
return (quant_act_int * correct_output_scale, self.act_scaling_factor)
|
class QuantAct(nn.Module):
'''
Quantizes the given activation.
Args:
activation_bit (`int`):
Bitwidth for the quantized activation.
act_range_momentum (`float`, *optional*, defaults to `0.95`):
Momentum for updating the activation quantization range.
per_channel (`bool`, *optional*, defaults to `False`):
Whether to or not use channel-wise quantization.
channel_len (`int`, *optional*):
Specify the channel length when set the *per_channel* True.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
'''
def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False):
pass
def __repr__(self):
pass
def forward(self, x, pre_act_scaling_factor=None, identity=None, identity_scaling_factor=None, specified_min=None, specified_max=None):
pass
| 4
| 1
| 28
| 4
| 23
| 2
| 4
| 0.27
| 1
| 4
| 2
| 0
| 3
| 9
| 3
| 13
| 104
| 15
| 70
| 26
| 58
| 19
| 43
| 18
| 39
| 9
| 1
| 2
| 12
|
3,025
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/quant_modules.py
|
transformers.models.ibert.quant_modules.QuantEmbedding
|
import torch
from torch import nn
class QuantEmbedding(nn.Module):
"""
Quantized version of `torch.nn.Embedding`. Adds quantization-specific arguments on top of `torch.nn.Embedding`.
Args:
weight_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the quantized weight.
momentum (`float`, *optional*, defaults to `0.95`):
Momentum for updating the activation quantization range.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
"""
def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, weight_bit=8, momentum=0.95, quant_mode=False):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer('weight_scaling_factor', torch.zeros(1))
self.register_buffer('weight_integer', torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if not self.quant_mode:
return (nn.functional.embedding(x, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse), None)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False)
self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor)
emb_int = nn.functional.embedding(x, self.weight_integer, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)
return (emb_int * self.weight_scaling_factor, self.weight_scaling_factor)
|
class QuantEmbedding(nn.Module):
'''
Quantized version of `torch.nn.Embedding`. Adds quantization-specific arguments on top of `torch.nn.Embedding`.
Args:
weight_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the quantized weight.
momentum (`float`, *optional*, defaults to `0.95`):
Momentum for updating the activation quantization range.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
'''
def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, weight_bit=8, momentum=0.95, quant_mode=False):
pass
def forward(self, x, positions=None, incremental_state=None):
pass
| 3
| 1
| 34
| 3
| 31
| 0
| 2
| 0.16
| 1
| 2
| 1
| 0
| 2
| 15
| 2
| 12
| 81
| 8
| 63
| 36
| 47
| 10
| 29
| 23
| 26
| 2
| 1
| 1
| 3
|
3,026
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/quant_modules.py
|
transformers.models.ibert.quant_modules.QuantLinear
|
from torch import nn
import torch
class QuantLinear(nn.Module):
"""
Quantized version of `torch.nn.Linear`. Adds quantization-specific arguments on top of `torch.nn.Linear`.
Args:
weight_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the quantized weight.
bias_bit (`int`, *optional*, defaults to `32`):
Bitwidth for the quantized bias.
per_channel (`bool`, *optional*, defaults to `False`):
Whether or not to use channel-wise quantization.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
"""
def __init__(self, in_features, out_features, bias=True, weight_bit=8, bias_bit=32, per_channel=False, quant_mode=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.zeros([out_features, in_features]))
self.register_buffer('weight_integer', torch.zeros_like(self.weight))
self.register_buffer('fc_scaling_factor', torch.zeros(self.out_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
self.register_buffer('bias_integer', torch.zeros_like(self.bias))
self.weight_bit = weight_bit
self.quant_mode = quant_mode
self.per_channel = per_channel
self.bias_bit = bias_bit
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def __repr__(self):
s = super().__repr__()
s = f'({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})'
return s
def forward(self, x, prev_act_scaling_factor=None):
if not self.quant_mode:
return (nn.functional.linear(x, weight=self.weight, bias=self.bias), None)
assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), 'Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. Please add a QuantAct layer with `per_channel = True` before this QuantAct layer'
w = self.weight
w_transform = w.data.detach()
if self.per_channel:
w_min, _ = torch.min(w_transform, dim=1, out=None)
w_max, _ = torch.max(w_transform, dim=1, out=None)
else:
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.fc_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, self.per_channel)
self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor)
bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor
if self.bias is not None:
self.bias_integer = self.weight_function(self.bias, self.bias_bit, False, bias_scaling_factor)
prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)
x_int = x / prev_act_scaling_factor
return (nn.functional.linear(x_int, weight=self.weight_integer, bias=self.bias_integer) * bias_scaling_factor, bias_scaling_factor)
|
class QuantLinear(nn.Module):
'''
Quantized version of `torch.nn.Linear`. Adds quantization-specific arguments on top of `torch.nn.Linear`.
Args:
weight_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the quantized weight.
bias_bit (`int`, *optional*, defaults to `32`):
Bitwidth for the quantized bias.
per_channel (`bool`, *optional*, defaults to `False`):
Whether or not to use channel-wise quantization.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
'''
def __init__(self, in_features, out_features, bias=True, weight_bit=8, bias_bit=32, per_channel=False, quant_mode=False):
pass
def __repr__(self):
pass
def forward(self, x, prev_act_scaling_factor=None):
pass
| 4
| 1
| 20
| 3
| 17
| 0
| 2
| 0.25
| 1
| 2
| 1
| 0
| 3
| 13
| 3
| 13
| 78
| 13
| 52
| 26
| 46
| 13
| 41
| 24
| 37
| 4
| 1
| 1
| 7
|
3,027
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/quant_modules.py
|
transformers.models.ibert.quant_modules.SymmetricQuantFunction
|
import torch
from torch.autograd import Function
class SymmetricQuantFunction(Function):
"""
Class to quantize the given floating-point values using symmetric quantization with given range and bitwidth.
"""
@staticmethod
def forward(ctx, x, k, percentile_mode, scale):
"""
Args:
x (`torch.Tensor`):
Floating point tensor to be quantized.
k (`int`):
Quantization bitwidth.
percentile_mode (`bool`):
Whether or not to use percentile calibration.
scale (`torch.Tensor`):
Pre-calculated scaling factor for *x*. Note that the current implementation of SymmetricQuantFunction
requires pre-calculated scaling factor.
Returns:
`torch.Tensor`: Symmetric-quantized value of *input*.
"""
zero_point = torch.tensor(0.0, device=scale.device)
n = 2 ** (k - 1) - 1
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
ctx.scale = scale
return new_quant_x
@staticmethod
def backward(ctx, grad_output):
scale = ctx.scale
if len(grad_output.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
elif len(grad_output.shape) == 2:
scale = scale.view(-1, 1)
else:
scale = scale.view(-1)
return (grad_output.clone() / scale, None, None, None, None)
|
class SymmetricQuantFunction(Function):
'''
Class to quantize the given floating-point values using symmetric quantization with given range and bitwidth.
'''
@staticmethod
def forward(ctx, x, k, percentile_mode, scale):
'''
Args:
x (`torch.Tensor`):
Floating point tensor to be quantized.
k (`int`):
Quantization bitwidth.
percentile_mode (`bool`):
Whether or not to use percentile calibration.
scale (`torch.Tensor`):
Pre-calculated scaling factor for *x*. Note that the current implementation of SymmetricQuantFunction
requires pre-calculated scaling factor.
Returns:
`torch.Tensor`: Symmetric-quantized value of *input*.
'''
pass
@staticmethod
def backward(ctx, grad_output):
pass
| 5
| 2
| 18
| 2
| 8
| 8
| 2
| 0.95
| 1
| 0
| 0
| 0
| 0
| 0
| 2
| 2
| 43
| 6
| 19
| 9
| 14
| 18
| 15
| 7
| 12
| 3
| 1
| 1
| 4
|
3,028
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/quant_modules.py
|
transformers.models.ibert.quant_modules.floor_ste
|
import torch
from torch.autograd import Function
class floor_ste(Function):
"""
Straight-through Estimator(STE) for torch.floor()
"""
@staticmethod
def forward(ctx, x):
return torch.floor(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
|
class floor_ste(Function):
'''
Straight-through Estimator(STE) for torch.floor()
'''
@staticmethod
def forward(ctx, x):
pass
@staticmethod
def backward(ctx, grad_output):
pass
| 5
| 1
| 2
| 0
| 2
| 0
| 1
| 0.43
| 1
| 0
| 0
| 0
| 0
| 0
| 2
| 2
| 12
| 2
| 7
| 5
| 2
| 3
| 5
| 3
| 2
| 1
| 1
| 0
| 2
|
3,029
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ibert/quant_modules.py
|
transformers.models.ibert.quant_modules.round_ste
|
from torch.autograd import Function
import torch
class round_ste(Function):
"""
Straight-through Estimator(STE) for torch.round()
"""
@staticmethod
def forward(ctx, x):
return torch.round(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
|
class round_ste(Function):
'''
Straight-through Estimator(STE) for torch.round()
'''
@staticmethod
def forward(ctx, x):
pass
@staticmethod
def backward(ctx, grad_output):
pass
| 5
| 1
| 2
| 0
| 2
| 0
| 1
| 0.43
| 1
| 0
| 0
| 0
| 0
| 0
| 2
| 2
| 12
| 2
| 7
| 5
| 2
| 3
| 5
| 3
| 2
| 1
| 1
| 0
| 2
|
3,030
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/configuration_idefics.py
|
transformers.models.idefics.configuration_idefics.IdeficsConfig
|
from ...configuration_utils import PretrainedConfig
class IdeficsConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Idefics-9B.
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
additional_vocab_size (`int`, *optional*, defaults to 0):
Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens
are always trainable whereas regular vocab tokens can be frozen or not.
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Idefics model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`~IdeficsModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
alpha_initializer (`str`, *optional*, defaults to `"zeros"`):
Initialization type for the alphas.
alphas_initializer_range (`float`, *optional*, defaults to 0.0):
The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross
Attention.
alpha_type (`str`, *optional*, defaults to `"float"`):
Whether the gating alphas should be vectors or single floats.
rms_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0)
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1)
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2)
End of stream token id.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
cross_layer_interval (`int`, *optional*, default to 1)
Interval for cross attention (from text to image) layers.
qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k
freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers
freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`):
Exceptions to freezing text layers when `freeze_text_layers` is `True`
freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head
freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers
freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`):
Exceptions to freezing vision layers when `freeze_vision_layers` is `True`
use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler
vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict
perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict
Example:
```python
>>> from transformers import IdeficsModel, IdeficsConfig
>>> # Initializing a Idefics idefics-9b style configuration
>>> configuration = IdeficsConfig()
>>> # Initializing a model from the idefics-9b style configuration
>>> model = IdeficsModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'idefics'
sub_configs = {'perceiver_config': IdeficsPerceiverConfig, 'vision_config': IdeficsVisionConfig}
def __init__(self, vocab_size=32000, additional_vocab_size=0, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, dropout=0.0, hidden_act='silu', initializer_range=0.02, alpha_initializer='zeros', alphas_initializer_range=0.0, alpha_type='float', rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, cross_layer_interval=1, qk_layer_norms=False, freeze_text_layers=True, freeze_text_module_exceptions=[], freeze_lm_head=False, freeze_vision_layers=True, freeze_vision_module_exceptions=[], use_resampler=False, vision_config=None, perceiver_config=None, **kwargs):
self.vocab_size = vocab_size
self.additional_vocab_size = additional_vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.alpha_initializer = alpha_initializer
self.alphas_initializer_range = alphas_initializer_range
self.alpha_type = alpha_type
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.cross_layer_interval = cross_layer_interval
self.qk_layer_norms = qk_layer_norms
self.freeze_vision_layers = freeze_vision_layers
self.freeze_text_layers = freeze_text_layers
self.freeze_text_module_exceptions = freeze_text_module_exceptions
self.freeze_vision_module_exceptions = freeze_vision_module_exceptions
self.freeze_lm_head = freeze_lm_head
self.use_resampler = use_resampler
if perceiver_config is None:
self.perceiver_config = IdeficsPerceiverConfig()
elif isinstance(perceiver_config, dict):
self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config)
elif isinstance(perceiver_config, IdeficsPerceiverConfig):
self.perceiver_config = perceiver_config
if vision_config is None:
self.vision_config = IdeficsVisionConfig()
elif isinstance(vision_config, dict):
self.vision_config = IdeficsVisionConfig(**vision_config)
elif isinstance(vision_config, IdeficsVisionConfig):
self.vision_config = vision_config
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class IdeficsConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Idefics-9B.
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
additional_vocab_size (`int`, *optional*, defaults to 0):
Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens
are always trainable whereas regular vocab tokens can be frozen or not.
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Idefics model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`~IdeficsModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
alpha_initializer (`str`, *optional*, defaults to `"zeros"`):
Initialization type for the alphas.
alphas_initializer_range (`float`, *optional*, defaults to 0.0):
The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross
Attention.
alpha_type (`str`, *optional*, defaults to `"float"`):
Whether the gating alphas should be vectors or single floats.
rms_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0)
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1)
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2)
End of stream token id.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
cross_layer_interval (`int`, *optional*, default to 1)
Interval for cross attention (from text to image) layers.
qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k
freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers
freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`):
Exceptions to freezing text layers when `freeze_text_layers` is `True`
freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head
freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers
freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`):
Exceptions to freezing vision layers when `freeze_vision_layers` is `True`
use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler
vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict
perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict
Example:
```python
>>> from transformers import IdeficsModel, IdeficsConfig
>>> # Initializing a Idefics idefics-9b style configuration
>>> configuration = IdeficsConfig()
>>> # Initializing a model from the idefics-9b style configuration
>>> model = IdeficsModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=32000, additional_vocab_size=0, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, dropout=0.0, hidden_act='silu', initializer_range=0.02, alpha_initializer='zeros', alphas_initializer_range=0.0, alpha_type='float', rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, cross_layer_interval=1, qk_layer_norms=False, freeze_text_layers=True, freeze_text_module_exceptions=[], freeze_lm_head=False, freeze_vision_layers=True, freeze_vision_module_exceptions=[], use_resampler=False, vision_config=None, perceiver_config=None, **kwargs):
pass
| 2
| 1
| 79
| 6
| 73
| 0
| 7
| 0.93
| 1
| 4
| 2
| 0
| 1
| 24
| 1
| 1
| 163
| 16
| 76
| 59
| 43
| 71
| 35
| 28
| 33
| 7
| 1
| 1
| 7
|
3,031
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/configuration_idefics.py
|
transformers.models.idefics.configuration_idefics.IdeficsPerceiverConfig
|
from ...configuration_utils import PretrainedConfig
class IdeficsPerceiverConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Idefics-9B.
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_resampler (`bool`, *optional*, defaults to `False`):
Whether or not to use the resampler
resampler_n_latents (`int`, *optional*, defaults to 64):
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
resampler_depth (`int`, *optional*, defaults to 6):
Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
resampler_n_heads (`int`, *optional*, defaults to 16):
Number of heads in each Transformer block (for multi-headed self-attention).
resampler_head_dim (`int`, *optional*, defaults to 96):
Dimensionality of each head projection in the Transformer block.
qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
Whether or not to use qk layer norms in perceiver
"""
model_type = 'idefics_perciever'
def __init__(self, use_resampler=False, resampler_n_latents=64, resampler_depth=6, resampler_n_heads=16, resampler_head_dim=96, qk_layer_norms_perceiver=False, **kwargs):
self.use_resampler = use_resampler
self.resampler_n_latents = resampler_n_latents
self.resampler_depth = resampler_depth
self.resampler_n_heads = resampler_n_heads
self.resampler_head_dim = resampler_head_dim
self.qk_layer_norms_perceiver = qk_layer_norms_perceiver
super().__init__(**kwargs)
|
class IdeficsPerceiverConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Idefics-9B.
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_resampler (`bool`, *optional*, defaults to `False`):
Whether or not to use the resampler
resampler_n_latents (`int`, *optional*, defaults to 64):
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
resampler_depth (`int`, *optional*, defaults to 6):
Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
resampler_n_heads (`int`, *optional*, defaults to 16):
Number of heads in each Transformer block (for multi-headed self-attention).
resampler_head_dim (`int`, *optional*, defaults to 96):
Dimensionality of each head projection in the Transformer block.
qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
Whether or not to use qk layer norms in perceiver
'''
def __init__(self, use_resampler=False, resampler_n_latents=64, resampler_depth=6, resampler_n_heads=16, resampler_head_dim=96, qk_layer_norms_perceiver=False, **kwargs):
pass
| 2
| 1
| 18
| 1
| 17
| 0
| 1
| 1.11
| 1
| 1
| 0
| 0
| 1
| 6
| 1
| 1
| 46
| 6
| 19
| 18
| 8
| 21
| 10
| 9
| 8
| 1
| 1
| 0
| 1
|
3,032
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/configuration_idefics.py
|
transformers.models.idefics.configuration_idefics.IdeficsVisionConfig
|
from ...configuration_utils import PretrainedConfig
class IdeficsVisionConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Idefics-9B.
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
embed_dim (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer. (elsewhere referred to as `hidden_size`)
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of image channels.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
testing).
"""
model_type = 'idefics_vision'
attribute_map = {'hidden_size': 'embed_dim'}
def __init__(self, embed_dim=768, image_size=224, intermediate_size=5120, patch_size=14, num_hidden_layers=32, num_attention_heads=16, num_channels=3, hidden_act='gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs):
self.embed_dim = embed_dim
self.image_size = image_size
self.intermediate_size = intermediate_size
self.patch_size = patch_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.layer_norm_eps = layer_norm_eps
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.hidden_act = hidden_act
super().__init__(**kwargs)
|
class IdeficsVisionConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Idefics-9B.
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
embed_dim (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer. (elsewhere referred to as `hidden_size`)
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of image channels.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
testing).
'''
def __init__(self, embed_dim=768, image_size=224, intermediate_size=5120, patch_size=14, num_hidden_layers=32, num_attention_heads=16, num_channels=3, hidden_act='gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs):
pass
| 2
| 1
| 30
| 1
| 29
| 0
| 1
| 1.03
| 1
| 1
| 0
| 0
| 1
| 12
| 1
| 1
| 75
| 6
| 34
| 31
| 17
| 35
| 17
| 16
| 15
| 1
| 1
| 0
| 1
|
3,033
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/image_processing_idefics.py
|
transformers.models.idefics.image_processing_idefics.IdeficsImageProcessor
|
from ...utils import TensorType, is_torch_available
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import resize, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, make_flat_list_of_images, to_numpy_array, valid_images
from typing import Callable, Optional, Union
class IdeficsImageProcessor(BaseImageProcessor):
"""
Constructs a Idefics image processor.
Args:
image_size (`int`, *optional*, defaults to 224):
Resize to image size
image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
image_num_channels (`int`, *optional*, defaults to 3):
Number of image channels.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
"""
model_input_names = ['pixel_values']
def __init__(self, image_size: int=224, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, image_num_channels: Optional[int]=3, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, **kwargs) -> None:
super().__init__(**kwargs)
self.image_size = image_size
self.image_num_channels = image_num_channels
self.image_mean = image_mean if image_mean is not None else IDEFICS_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IDEFICS_STANDARD_STD
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
def preprocess(self, images: ImageInput, image_num_channels: Optional[int]=3, image_size: Optional[dict[str, int]]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, transform: Optional[Callable]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, return_tensors: Optional[Union[str, TensorType]]=TensorType.PYTORCH, **kwargs) -> TensorType:
"""
Preprocess a batch of images.
Args:
images (`ImageInput`):
A list of images to preprocess.
image_size (`int`, *optional*, defaults to `self.image_size`):
Resize to image size
image_num_channels (`int`, *optional*, defaults to `self.image_num_channels`):
Number of image channels.
image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can
be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess`
method. Can be overridden by the `image_std` parameter in the `preprocess` method.
transform (`Callable`, *optional*, defaults to `None`):
A custom transform function that accepts a single image can be passed for training. For example,
`torchvision.Compose` can be used to compose multiple transforms. If `None` - an inference mode is
assumed - and then a preset of inference-specific transforms will be applied to the images
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
Returns:
a PyTorch tensor of the processed images
"""
image_size = image_size if image_size is not None else self.image_size
image_num_channels = image_num_channels if image_num_channels is not None else self.image_num_channels
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
size = (image_size, image_size)
if isinstance(images, list) and len(images) == 0:
return []
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
if transform is not None:
if not is_torch_available():
raise ImportError('To pass in `transform` torch must be installed')
import torch
images = [transform(x) for x in images]
return torch.stack(images)
images = [convert_to_rgb(x) for x in images]
images = [to_numpy_array(x) for x in images]
images = [resize(x, size, resample=PILImageResampling.BICUBIC) for x in images]
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
images = [self.normalize(x, mean=image_mean, std=image_std) for x in images]
images = [to_channel_dimension_format(x, ChannelDimension.FIRST) for x in images]
images = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)['pixel_values']
return images
|
class IdeficsImageProcessor(BaseImageProcessor):
'''
Constructs a Idefics image processor.
Args:
image_size (`int`, *optional*, defaults to 224):
Resize to image size
image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
image_num_channels (`int`, *optional*, defaults to 3):
Number of image channels.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
'''
def __init__(self, image_size: int=224, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, image_num_channels: Optional[int]=3, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, **kwargs) -> None:
pass
def preprocess(self, images: ImageInput, image_num_channels: Optional[int]=3, image_size: Optional[dict[str, int]]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, transform: Optional[Callable]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, return_tensors: Optional[Union[str, TensorType]]=TensorType.PYTORCH, **kwargs) -> TensorType:
'''
Preprocess a batch of images.
Args:
images (`ImageInput`):
A list of images to preprocess.
image_size (`int`, *optional*, defaults to `self.image_size`):
Resize to image size
image_num_channels (`int`, *optional*, defaults to `self.image_num_channels`):
Number of image channels.
image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can
be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess`
method. Can be overridden by the `image_std` parameter in the `preprocess` method.
transform (`Callable`, *optional*, defaults to `None`):
A custom transform function that accepts a single image can be passed for training. For example,
`torchvision.Compose` can be used to compose multiple transforms. If `None` - an inference mode is
assumed - and then a preset of inference-specific transforms will be applied to the images
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
Returns:
a PyTorch tensor of the processed images
'''
pass
| 3
| 2
| 48
| 6
| 26
| 17
| 5
| 0.94
| 1
| 9
| 2
| 0
| 2
| 4
| 2
| 22
| 118
| 15
| 53
| 27
| 32
| 50
| 33
| 10
| 29
| 9
| 3
| 2
| 10
|
3,034
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsAttention
|
from typing import Any, Callable, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PretrainedConfig, PreTrainedModel
from ...processing_utils import Unpack
class IdeficsAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, hidden_size: int, num_heads: int, dropout: float=0.0, is_cross_attention: bool=False, config: PretrainedConfig=None, qk_layer_norms: bool=False, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.hidden_size = hidden_size
self.num_heads = num_heads
self.head_dim = hidden_size // num_heads
self.dropout = dropout
self.is_causal = True
self.scaling = self.head_dim ** (-0.5)
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
if self.head_dim * num_heads != self.hidden_size:
raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {num_heads}).')
self.is_cross_attention = is_cross_attention
if not hasattr(nn.functional, 'scaled_dot_product_attention'):
raise ValueError('this model requires pytorch 2.0 or higher')
if self.is_cross_attention:
kv_input_dim = self.hidden_size if not hasattr(config.vision_config, 'embed_dim') else config.vision_config.embed_dim
self.q_proj = nn.Linear(self.hidden_size, num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(kv_input_dim, num_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(kv_input_dim, num_heads * self.head_dim, bias=False)
else:
self.q_proj = nn.Linear(self.hidden_size, num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, num_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, num_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(num_heads * self.head_dim, hidden_size, bias=False)
self.rotary_emb = IdeficsEmbedding(self.head_dim)
self.qk_layer_norms = qk_layer_norms
if self.qk_layer_norms:
self.q_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
is_cross_attention = self.is_cross_attention or key_value_states is not None
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
if not is_cross_attention:
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
else:
_, kv_len, _ = key_value_states.size()
key_states = self.k_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[-2]
if past_key_values is not None:
kv_seq_len += cache_position[0]
if not is_cross_attention:
cos, sin = self.rotary_emb(value_states, seq_len=max(kv_seq_len, q_len))
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
if past_key_values is not None:
cache_kwargs = {'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
if self.qk_layer_norms:
query_states = self.q_layer_norm(query_states)
key_states = self.k_layer_norm(key_states)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class IdeficsAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, hidden_size: int, num_heads: int, dropout: float=0.0, is_cross_attention: bool=False, config: PretrainedConfig=None, qk_layer_norms: bool=False, layer_idx: Optional[int]=None):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 5
| 1
| 56
| 7
| 46
| 3
| 7
| 0.07
| 1
| 8
| 2
| 0
| 3
| 15
| 3
| 13
| 173
| 24
| 140
| 52
| 117
| 10
| 72
| 33
| 68
| 12
| 1
| 1
| 20
|
3,035
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsBaseModelOutputWithPast
|
from ...modeling_outputs import ModelOutput
from typing import Any, Callable, Optional, Union
from dataclasses import dataclass
import torch
import torch.nn.functional as F
from ...cache_utils import Cache, DynamicCache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
@dataclass
@auto_docstring(custom_intro="\n Base class for Idefics model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ")
class IdeficsBaseModelOutputWithPast(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Base class for Idefics model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ")
class IdeficsBaseModelOutputWithPast(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4.83
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 7
| 6
| 6
| 5
| 29
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
3,036
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast
|
from typing import Any, Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...cache_utils import Cache, DynamicCache
import torch
import torch.nn.functional as F
from ...modeling_outputs import ModelOutput
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Base class for Idefics causal language model (or autoregressive) outputs.\n ')
class IdeficsCausalLMOutputWithPast(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for Idefics causal language model (or autoregressive) outputs.\n ')
class IdeficsCausalLMOutputWithPast(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.71
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 6
| 7
| 7
| 6
| 26
| 7
| 7
| 6
| 0
| 1
| 0
| 0
|
3,037
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsDecoderLayer
|
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from .configuration_idefics import IdeficsConfig
from ...processing_utils import Unpack
from ...cache_utils import Cache, DynamicCache
import torch.nn.functional as F
from torch import nn
from typing import Any, Callable, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
from ...utils.deprecation import deprecate_kwarg
import torch
class IdeficsDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: IdeficsConfig, layer_idx: Optional[int]=None):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = IdeficsAttention(hidden_size=self.hidden_size, num_heads=config.num_attention_heads, dropout=config.dropout, config=config, layer_idx=layer_idx)
self.mlp = IdeficsMLP(hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act)
self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.dropout = config.dropout
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
return hidden_states
|
class IdeficsDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: IdeficsConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
pass
| 5
| 0
| 38
| 4
| 26
| 8
| 2
| 0.28
| 1
| 8
| 4
| 0
| 2
| 6
| 2
| 12
| 77
| 9
| 53
| 21
| 41
| 15
| 26
| 12
| 23
| 3
| 1
| 1
| 4
|
3,038
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsDecoupledEmbedding
|
from torch import nn
import torch.nn.functional as F
import torch
from typing import Any, Callable, Optional, Union
class IdeficsDecoupledEmbedding(nn.Embedding):
"""
Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings. In practise, the
regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0,
then it will create `num_additional_embeddings` additional parameters that are always trained. If
`num_additional_embeddings=0`, then the module defaults back to the regular behavior of `nn.Embedding`.
"""
def __init__(self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: Optional[bool]=False, device=None, dtype=None, padding_idx=None, **kwargs) -> None:
"""
Args:
num_embeddings (`int`):
Size of the dictionary of embeddings
num_additional_embeddings (`int`):
Number of additional embeddings. Only useful when you `partially_freeze=True`.
embedding_dim (`int`):
The size of each embedding vector
partially_freeze: (`bool`, *optional*, defaults to `False`):
If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen.
padding_idx (`int`, *optional*):
The padding index (needs to be less than num_embeddings)
Note: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`,
`max_norm` or `norm_type`. We are not supporting these.
"""
if padding_idx is not None and padding_idx > num_embeddings:
raise ValueError(f'padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}')
super().__init__(num_embeddings=num_embeddings, embedding_dim=embedding_dim, device=device, dtype=dtype, padding_idx=padding_idx, **kwargs)
self.num_embeddings = num_embeddings
self.padding_idx = padding_idx
self.num_additional_embeddings = num_additional_embeddings
self.partially_freeze = partially_freeze
if partially_freeze:
self.weight.requires_grad_(False)
if self.num_additional_embeddings > 0:
self.additional_embedding = nn.Embedding(num_embeddings=self.num_additional_embeddings, embedding_dim=embedding_dim, device=device, dtype=dtype)
def forward(self, input_ids):
"""
we have 2 embeddings, with different indices - one pretrained self.weight and another
self.additional_embedding.weight that is being trained.
in order to make a lookup of the input ids, we:
1. find out the indices of the entries belonging to the 2nd embedding
2. extract those values while subtracting the size of the first embedding (num_embeddings), since the 2nd
embedding starts from 0 and not num_embeddings
3. perform the 2nd embedding lookup
4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index
5. perform the 1st embedding lookup
6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup
note: for the 1st embedding lookup we could have looked up only the low indices and not do the padding, but
then we have to create a new tensor and populate it with 2 tensors that are spread out across various indices -
i.e. not a simple concat - I haven't benchmarked the complex case if it's any faster, given that seqlens are
usually relatively short it's probably not faster or if faster not by much - but might be a good idea to
measure.
"""
if self.num_additional_embeddings == 0:
return F.embedding(input_ids, self.weight)
input_ids = input_ids.clone()
additional_vocab_indices = torch.where(input_ids >= self.num_embeddings)
input_ids_additional_vocab = input_ids[additional_vocab_indices]
additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings)
input_ids[additional_vocab_indices] = 0
full_vector = F.embedding(input_ids, self.weight)
full_vector[additional_vocab_indices] = additional_embeddings
return full_vector
def extra_repr(self) -> str:
return f'num_embeddings={self.num_embeddings}, num_additional_embeddings={self.num_additional_embeddings}, embedding_dim={self.embedding_dim}, partially_freeze={self.partially_freeze}'
|
class IdeficsDecoupledEmbedding(nn.Embedding):
'''
Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings. In practise, the
regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0,
then it will create `num_additional_embeddings` additional parameters that are always trained. If
`num_additional_embeddings=0`, then the module defaults back to the regular behavior of `nn.Embedding`.
'''
def __init__(self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: Optional[bool]=False, device=None, dtype=None, padding_idx=None, **kwargs) -> None:
'''
Args:
num_embeddings (`int`):
Size of the dictionary of embeddings
num_additional_embeddings (`int`):
Number of additional embeddings. Only useful when you `partially_freeze=True`.
embedding_dim (`int`):
The size of each embedding vector
partially_freeze: (`bool`, *optional*, defaults to `False`):
If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen.
padding_idx (`int`, *optional*):
The padding index (needs to be less than num_embeddings)
Note: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`,
`max_norm` or `norm_type`. We are not supporting these.
'''
pass
def forward(self, input_ids):
'''
we have 2 embeddings, with different indices - one pretrained self.weight and another
self.additional_embedding.weight that is being trained.
in order to make a lookup of the input ids, we:
1. find out the indices of the entries belonging to the 2nd embedding
2. extract those values while subtracting the size of the first embedding (num_embeddings), since the 2nd
embedding starts from 0 and not num_embeddings
3. perform the 2nd embedding lookup
4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index
5. perform the 1st embedding lookup
6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup
note: for the 1st embedding lookup we could have looked up only the low indices and not do the padding, but
then we have to create a new tensor and populate it with 2 tensors that are spread out across various indices -
i.e. not a simple concat - I haven't benchmarked the complex case if it's any faster, given that seqlens are
usually relatively short it's probably not faster or if faster not by much - but might be a good idea to
measure.
'''
pass
def extra_repr(self) -> str:
pass
| 4
| 3
| 32
| 3
| 17
| 12
| 2
| 0.79
| 1
| 4
| 0
| 0
| 3
| 5
| 3
| 3
| 108
| 13
| 53
| 23
| 39
| 42
| 26
| 13
| 22
| 4
| 1
| 1
| 7
|
3,039
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsDecoupledLinear
|
from torch import nn
import torch.nn.functional as F
import torch
class IdeficsDecoupledLinear(nn.Linear):
"""
Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the
regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0,
then it will create `out_additional_features * in_features` additional parameters that are always trained. If
`out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`.
"""
def __init__(self, in_features: int, out_features: int, out_additional_features: int=0, bias: bool=True, partially_freeze: bool=True, device=None, dtype=None) -> None:
"""
out_additional_features: int. Number of additional trainable dimensions. Only makes sense when
`partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra
parameters (if any) will be trainable. If False, default to the regular behavior of nn.Linear.
"""
super().__init__(in_features, out_features, bias, device, dtype)
self.out_additional_features = out_additional_features
self.partially_freeze = partially_freeze
self.in_features = in_features
self.out_features = out_features
if partially_freeze:
self.weight.requires_grad_(False)
if bias:
self.bias.requires_grad_(False)
if out_additional_features > 0:
self.additional_fc = nn.Linear(in_features=in_features, out_features=out_additional_features, bias=bias, device=device, dtype=dtype)
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = F.linear(input, self.weight, self.bias)
if self.out_additional_features > 0:
additional_features = self.additional_fc(input)
output = torch.cat((output, additional_features), -1)
return output
def extra_repr(self) -> str:
"""Overwriting `nn.Linear.extra_repr` to include new parameters."""
return f'in_features={self.in_features}, out_features={self.out_features}, out_additional_features={self.out_additional_features}, bias={self.bias is not None}, partially_freeze={self.partially_freeze}'
|
class IdeficsDecoupledLinear(nn.Linear):
'''
Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the
regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0,
then it will create `out_additional_features * in_features` additional parameters that are always trained. If
`out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`.
'''
def __init__(self, in_features: int, out_features: int, out_additional_features: int=0, bias: bool=True, partially_freeze: bool=True, device=None, dtype=None) -> None:
'''
out_additional_features: int. Number of additional trainable dimensions. Only makes sense when
`partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra
parameters (if any) will be trainable. If False, default to the regular behavior of nn.Linear.
'''
pass
def forward(self, input: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
'''Overwriting `nn.Linear.extra_repr` to include new parameters.'''
pass
| 4
| 3
| 17
| 2
| 14
| 2
| 2
| 0.31
| 1
| 5
| 0
| 0
| 3
| 5
| 3
| 3
| 63
| 8
| 42
| 20
| 29
| 13
| 21
| 11
| 17
| 4
| 1
| 2
| 7
|
3,040
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsEmbedding
|
import torch.nn.functional as F
import torch
class IdeficsEmbedding(torch.nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / self.dim)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self._set_cos_sin_cache(seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype())
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
def forward(self, x, seq_len=None):
if seq_len > self.max_seq_len_cached:
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
return (self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype))
|
class IdeficsEmbedding(torch.nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
pass
def _set_cos_sin_cache(self, seq_len, device, dtype):
pass
def forward(self, x, seq_len=None):
pass
| 4
| 0
| 10
| 1
| 8
| 1
| 1
| 0.12
| 1
| 1
| 0
| 0
| 3
| 4
| 3
| 13
| 34
| 6
| 25
| 12
| 21
| 3
| 20
| 12
| 16
| 2
| 1
| 1
| 4
|
3,041
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsForVisionText2Text
|
from ...generation import GenerationMixin
from ...modeling_outputs import ModelOutput
import torch.nn.functional as F
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...processing_utils import Unpack
import torch
from ...cache_utils import Cache, DynamicCache
from typing import Any, Callable, Optional, Union
class IdeficsForVisionText2Text(IdeficsPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['model.embed_tokens.weight', 'lm_head.weight']
def __init__(self, config, vision_model=None):
super().__init__(config)
self.model = IdeficsModel(config)
self.lm_head = IdeficsDecoupledLinear(in_features=config.hidden_size, out_features=config.vocab_size, out_additional_features=config.additional_vocab_size, bias=False, partially_freeze=config.freeze_lm_head)
self.post_init()
def tie_weights(self):
"""
Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of
IdeficsDecoupledLinear and IdeficsDecoupledEmbedding.
"""
output_embeddings = self.get_output_embeddings()
input_embeddings = self.get_input_embeddings()
if getattr(self.config, 'tie_word_embeddings', True):
output_embeddings.weight = input_embeddings.weight
if input_embeddings.num_additional_embeddings > 0:
assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings
output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight
if hasattr(output_embeddings, 'out_features') and hasattr(input_embeddings, 'num_embeddings'):
output_embeddings.out_features = input_embeddings.num_embeddings
if hasattr(output_embeddings, 'out_additional_features') and hasattr(input_embeddings, 'num_additional_embeddings'):
output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_encoder_embeddings: Optional[torch.FloatTensor]=None, perceiver_embeddings: Optional[torch.FloatTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, IdeficsCausalLMOutputWithPast]:
"""
image_encoder_embeddings (`torch.FloatTensor`, *optional*):
The output of the image encoder.
perceiver_embeddings (`torch.FloatTensor`, *optional*):
The output of the perceiver resampler.
image_attention_mask (`torch.LongTensor`, *optional*):
The attention mask for the image encoder.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoProcessor, IdeficsForVisionText2Text
>>> model = IdeficsForVisionText2Text.from_pretrained("HuggingFaceM4/idefics-9b")
>>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics-9b")
>>> dogs_image_url_1 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image1.jpeg"
>>> dogs_image_url_2 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image2.jpeg"
>>> prompts = [
... [
... "User:",
... dogs_image_url_1,
... "Describe this image.\\nAssistant: An image of two dogs.\\n",
... "User:",
... dogs_image_url_2,
... "Describe this image.\\nAssistant:",
... ]
... ]
>>> inputs = processor(prompts, return_tensors="pt")
>>> generate_ids = model.generate(**inputs, max_new_tokens=6)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True)
```"""
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, image_encoder_embeddings=image_encoder_embeddings, perceiver_embeddings=perceiver_embeddings, image_attention_mask=image_attention_mask, use_cache=use_cache, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, cache_position=cache_position, **kwargs)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return IdeficsCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values=None, cache_position=None, pixel_values=None, image_hidden_states=None, image_attention_mask=None, use_cache=None, **kwargs):
images_kwargs = {}
if image_hidden_states is not None:
if self.config.use_resampler:
images_kwargs['perceiver_embeddings'] = image_hidden_states
else:
images_kwargs['image_encoder_embeddings'] = image_hidden_states
else:
images_kwargs['pixel_values'] = pixel_values
images_kwargs['interpolate_pos_encoding'] = kwargs.pop('interpolate_pos_encoding', False)
model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, use_cache=use_cache, image_attention_mask=image_attention_mask, **images_kwargs, **kwargs)
if image_attention_mask is not None and inputs_embeds is None:
seq_length = model_inputs['input_ids'].shape[1]
model_inputs['image_attention_mask'] = image_attention_mask[:, -seq_length:]
return model_inputs
def _update_model_kwargs_for_generation(self, outputs: ModelOutput, model_kwargs: dict[str, Any], is_encoder_decoder: bool=False, **kwargs) -> dict[str, Any]:
model_kwargs = super()._update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder, **kwargs)
if 'image_attention_mask' in model_kwargs:
image_attention_mask = model_kwargs['image_attention_mask']
last_mask = image_attention_mask[:, -1, :].unsqueeze(1)
if model_kwargs.get('use_cache', True):
model_kwargs['image_attention_mask'] = last_mask
else:
model_kwargs['image_attention_mask'] = torch.cat([image_attention_mask, last_mask], dim=1)
model_kwargs['image_hidden_states'] = outputs.image_hidden_states
return model_kwargs
|
class IdeficsForVisionText2Text(IdeficsPreTrainedModel, GenerationMixin):
def __init__(self, config, vision_model=None):
pass
def tie_weights(self):
'''
Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of
IdeficsDecoupledLinear and IdeficsDecoupledEmbedding.
'''
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_encoder_embeddings: Optional[torch.FloatTensor]=None, perceiver_embeddings: Optional[torch.FloatTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, IdeficsCausalLMOutputWithPast]:
'''
image_encoder_embeddings (`torch.FloatTensor`, *optional*):
The output of the image encoder.
perceiver_embeddings (`torch.FloatTensor`, *optional*):
The output of the perceiver resampler.
image_attention_mask (`torch.LongTensor`, *optional*):
The attention mask for the image encoder.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoProcessor, IdeficsForVisionText2Text
>>> model = IdeficsForVisionText2Text.from_pretrained("HuggingFaceM4/idefics-9b")
>>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics-9b")
>>> dogs_image_url_1 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image1.jpeg"
>>> dogs_image_url_2 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image2.jpeg"
>>> prompts = [
... [
... "User:",
... dogs_image_url_1,
... "Describe this image.\nAssistant: An image of two dogs.\n",
... "User:",
... dogs_image_url_2,
... "Describe this image.\nAssistant:",
... ]
... ]
>>> inputs = processor(prompts, return_tensors="pt")
>>> generate_ids = model.generate(**inputs, max_new_tokens=6)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True)
```'''
pass
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values=None, cache_position=None, pixel_values=None, image_hidden_states=None, image_attention_mask=None, use_cache=None, **kwargs):
pass
def _update_model_kwargs_for_generation(self, outputs: ModelOutput, model_kwargs: dict[str, Any], is_encoder_decoder: bool=False, **kwargs) -> dict[str, Any]:
pass
| 8
| 2
| 22
| 2
| 16
| 4
| 3
| 0.24
| 2
| 9
| 3
| 0
| 11
| 2
| 12
| 13
| 279
| 38
| 195
| 72
| 142
| 46
| 97
| 33
| 84
| 12
| 2
| 3
| 37
|
3,042
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsGatedCrossAttentionLayer
|
import torch
from ...cache_utils import Cache, DynamicCache
from typing import Any, Callable, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.deprecation import deprecate_kwarg
import torch.nn.functional as F
from .configuration_idefics import IdeficsConfig
from torch import nn
from ...processing_utils import Unpack
class IdeficsGatedCrossAttentionLayer(GradientCheckpointingLayer):
def __init__(self, config: IdeficsConfig, layer_idx: Optional[int]=None):
super().__init__()
self.hidden_size = config.hidden_size
self.cross_attn = IdeficsAttention(hidden_size=self.hidden_size, num_heads=config.num_attention_heads, is_cross_attention=True, dropout=config.dropout, config=config, qk_layer_norms=config.qk_layer_norms, layer_idx=layer_idx)
self.mlp = IdeficsMLP(hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act)
self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.config = config.dropout
self.act_cross_attn = nn.Tanh()
self.act_dense = nn.Tanh()
if config.alpha_initializer == 'zeros':
if config.alpha_type == 'vector':
self.alpha_cross_attn = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
self.alpha_dense = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
elif config.alpha_type == 'float':
self.alpha_cross_attn = nn.Parameter(torch.zeros(1))
self.alpha_dense = nn.Parameter(torch.zeros(1))
else:
raise ValueError(f'Unknown value for `alpha_type` ({config.alpha_type})')
elif config.alpha_initializer == 'ones':
if config.alpha_type == 'vector':
self.alpha_cross_attn = nn.Parameter(torch.ones(1, 1, self.hidden_size))
self.alpha_dense = nn.Parameter(torch.ones(1, 1, self.hidden_size))
elif config.alpha_type == 'float':
self.alpha_cross_attn = nn.Parameter(torch.ones(1))
self.alpha_dense = nn.Parameter(torch.ones(1))
else:
raise ValueError(f'Unknown value for `alpha_type` ({config.alpha_type})')
elif config.alpha_initializer in {'normal', 'gaussian', 'random'}:
if config.alpha_type == 'vector':
self.alpha_cross_attn = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size)))
self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size)))
elif config.alpha_type == 'float':
self.alpha_cross_attn = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=1))
self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=1))
else:
raise ValueError(f'Unknown value for `alpha_type` ({config.alpha_type})')
else:
raise NotImplementedError(f'Alpha initialization scheme {config.alpha_initializer} not yet implemented!')
if not (hasattr(self, 'alpha_cross_attn') and hasattr(self, 'alpha_dense')):
raise ValueError('Alpha parameters not initialized correctly!')
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, image_hidden_states: Optional[torch.Tensor]=None, image_attention_mask: Optional[torch.Tensor]=None, cross_attention_gate: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
"""
image_hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`
image_attention_mask (`torch.FloatTensor`, *optional*):
image attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
cross_attention_gate (`torch.FloatTensor`, *optional*):
gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images.
"""
if image_hidden_states is None:
raise ValueError('`image_hidden_states` is required for Idefics cross attention module which are visual features to be conditioned on.')
if cross_attention_gate is None:
raise ValueError('`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images.')
if past_key_values is not None:
raise NotImplementedError('Past key value states are not implemented for Idefics cross attention module.')
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.cross_attn(hidden_states=hidden_states, key_value_states=image_hidden_states, attention_mask=image_attention_mask, **kwargs)
hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
hidden_states = hidden_states.masked_fill((cross_attention_gate == 0)[:, :, None], 0.0)
hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states
return hidden_states
|
class IdeficsGatedCrossAttentionLayer(GradientCheckpointingLayer):
def __init__(self, config: IdeficsConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, image_hidden_states: Optional[torch.Tensor]=None, image_attention_mask: Optional[torch.Tensor]=None, cross_attention_gate: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
'''
image_hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`
image_attention_mask (`torch.FloatTensor`, *optional*):
image attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
cross_attention_gate (`torch.FloatTensor`, *optional*):
gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images.
'''
pass
| 5
| 1
| 70
| 8
| 52
| 10
| 9
| 0.19
| 1
| 10
| 4
| 0
| 2
| 10
| 2
| 12
| 141
| 17
| 104
| 26
| 91
| 20
| 57
| 16
| 54
| 11
| 1
| 2
| 17
|
3,043
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsMLP
|
from ...activations import ACT2FN
from torch import nn
class IdeficsMLP(nn.Module):
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str):
super().__init__()
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.act_fn = ACT2FN[hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
class IdeficsMLP(nn.Module):
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str):
pass
def forward(self, x):
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 4
| 2
| 12
| 15
| 1
| 14
| 12
| 6
| 0
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
3,044
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsModel
|
from typing import Any, Callable, Optional, Union
from ...utils.generic import OutputRecorder, check_model_inputs
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
import torch.nn.functional as F
from ...masking_utils import create_causal_mask
import torch
from ...processing_utils import Unpack
from torch import nn
from .perceiver import IdeficsPerceiverResampler
from ...cache_utils import Cache, DynamicCache
from .configuration_idefics import IdeficsConfig
from .vision import IdeficsVisionEmbeddings, IdeficsVisionTransformer
@auto_docstring
class IdeficsModel(IdeficsPreTrainedModel):
"""
Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`]
Args:
config: IdeficsConfig
"""
def __init__(self, config: IdeficsConfig):
super().__init__(config)
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = IdeficsDecoupledEmbedding(num_embeddings=config.vocab_size, num_additional_embeddings=config.additional_vocab_size, embedding_dim=config.hidden_size, partially_freeze=config.freeze_text_layers, padding_idx=self.padding_idx)
self.image_size = config.vision_config.image_size
self.vision_config = config.vision_config
self.vision_config._attn_implementation = config._attn_implementation
self.vision_model = IdeficsVisionTransformer(config.vision_config)
if config.use_resampler:
perceiver_config = config.perceiver_config
self.perceiver_resampler = IdeficsPerceiverResampler(config, config.vision_config.embed_dim, perceiver_config.resampler_depth, perceiver_config.resampler_n_heads, perceiver_config.resampler_head_dim, perceiver_config.resampler_n_latents)
self.layers = nn.ModuleList([IdeficsDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.cross_layer_interval = config.cross_layer_interval
num_cross_layers = config.num_hidden_layers // self.cross_layer_interval
self.gated_cross_attn_layers = nn.ModuleList([IdeficsGatedCrossAttentionLayer(config, layer_idx=i) for i in range(num_cross_layers)])
self.gradient_checkpointing = False
self.norm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_init()
self.freeze_relevant_params(config)
def freeze_relevant_params(self, config=None):
if config is None:
config = self.config
if config.freeze_text_layers:
self.freeze_text_layers(config.freeze_text_module_exceptions)
if config.freeze_vision_layers:
freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions)
def freeze_text_layers(self, module_exceptions=[]):
for module in [self.layers, self.norm]:
freeze_model(module, module_exceptions=module_exceptions)
def freeze_vision_layers(self, module_exceptions=[]):
freeze_model(self.vision_model, module_exceptions=module_exceptions)
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_encoder_embeddings: Optional[torch.FloatTensor]=None, perceiver_embeddings: Optional[torch.FloatTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, IdeficsBaseModelOutputWithPast]:
"""
image_encoder_embeddings (`torch.FloatTensor`, *optional*):
The output of the image encoder.
perceiver_embeddings (`torch.FloatTensor`, *optional*):
The output of the perceiver resampler.
image_attention_mask (`torch.LongTensor`, *optional*):
The attention mask for the image encoder.
"""
device = input_ids.device if input_ids is not None else inputs_embeds.device
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
batch_size, seq_length, _ = inputs_embeds.shape
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
seq_length_with_past = seq_length + past_key_values_length
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + inputs_embeds.shape[1], device=inputs_embeds.device)
if attention_mask is not None and position_ids is None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids[:, -seq_length:]
elif position_ids is None:
position_ids = cache_position.unsqueeze(0)
if sum([x is None for x in [pixel_values, image_encoder_embeddings, perceiver_embeddings]]) != 2:
raise ValueError('Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None.')
elif pixel_values is not None:
pixel_values = pixel_values.to(dtype=self.dtype, device=device)
batch_size, num_images = pixel_values.shape[:2]
pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:])
image_hidden_states = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state
elif image_encoder_embeddings is not None:
batch_size, num_images, image_seq_len, image_hidden_size = image_encoder_embeddings.size()
image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=device)
image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size)
if self.config.use_resampler:
if perceiver_embeddings is None:
perceiver_embeddings = self.perceiver_resampler(image_hidden_states)
image_seq_len, image_hidden_size = (perceiver_embeddings.size(1), perceiver_embeddings.size(2))
else:
batch_size, num_images, image_seq_len, image_hidden_size = perceiver_embeddings.size()
image_hidden_states = perceiver_embeddings
elif perceiver_embeddings is None:
image_seq_len, image_hidden_size = (image_hidden_states.size(1), image_hidden_states.size(2))
else:
raise ValueError('If `perceiver_embeddings` are passed, use_resampler should be True')
image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size)
text_seq_len = image_attention_mask.size(1)
image_attention_mask = image_attention_mask.unsqueeze(-1)
image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len)
image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len)
if image_hidden_states is not None:
image_batch_size, image_sequence_length, _ = image_hidden_states.size()
image_hidden_shape = (image_batch_size, image_sequence_length)
if image_attention_mask is None:
image_attention_mask = torch.ones(image_hidden_shape, device=device)
image_attention_mask = self.invert_attention_mask(image_attention_mask)
else:
image_attention_mask = None
cross_attention_gate = (image_attention_mask == 0.0).any(dim=-1).to(dtype=self.dtype).squeeze(dim=1).to(device)
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
for idx, decoder_layer in enumerate(self.layers):
if idx % self.cross_layer_interval == 0:
cross_attn_block = self.gated_cross_attn_layers[idx // self.cross_layer_interval]
hidden_states = cross_attn_block(hidden_states, causal_mask, image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, past_key_values=None, **kwargs)
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs)
hidden_states = self.norm(hidden_states)
image_hidden_states = image_hidden_states.view(batch_size, num_images, image_seq_len, image_hidden_size)
return IdeficsBaseModelOutputWithPast(last_hidden_state=hidden_states, image_hidden_states=image_hidden_states, past_key_values=past_key_values)
|
@auto_docstring
class IdeficsModel(IdeficsPreTrainedModel):
'''
Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`]
Args:
config: IdeficsConfig
'''
def __init__(self, config: IdeficsConfig):
pass
def freeze_relevant_params(self, config=None):
pass
def freeze_text_layers(self, module_exceptions=[]):
pass
def freeze_vision_layers(self, module_exceptions=[]):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_encoder_embeddings: Optional[torch.FloatTensor]=None, perceiver_embeddings: Optional[torch.FloatTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, IdeficsBaseModelOutputWithPast]:
'''
image_encoder_embeddings (`torch.FloatTensor`, *optional*):
The output of the image encoder.
perceiver_embeddings (`torch.FloatTensor`, *optional*):
The output of the perceiver resampler.
image_attention_mask (`torch.LongTensor`, *optional*):
The attention mask for the image encoder.
'''
pass
| 9
| 2
| 49
| 5
| 39
| 5
| 6
| 0.16
| 1
| 20
| 12
| 0
| 8
| 14
| 9
| 10
| 471
| 61
| 356
| 111
| 295
| 58
| 172
| 60
| 161
| 37
| 2
| 3
| 63
|
3,045
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsPreTrainedModel
|
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PretrainedConfig, PreTrainedModel
from .perceiver import IdeficsPerceiverResampler
from ...utils.generic import OutputRecorder, check_model_inputs
from .configuration_idefics import IdeficsConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from .vision import IdeficsVisionEmbeddings, IdeficsVisionTransformer
@auto_docstring
class IdeficsPreTrainedModel(PreTrainedModel):
config: IdeficsConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['IdeficsDecoderLayer', 'IdeficsGatedCrossAttentionLayer']
_supports_sdpa = True
_supports_flash_attn = False
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': IdeficsDecoderLayer, 'attentions': OutputRecorder(IdeficsAttention, index=1, layer_name='self_attn')}
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
elif isinstance(module, IdeficsRMSNorm):
module.weight.data.fill_(1.0)
elif isinstance(module, IdeficsVisionEmbeddings):
module.class_embedding.data.normal_()
elif isinstance(module, IdeficsGatedCrossAttentionLayer):
if self.config.alpha_initializer == 'zeros':
module.alpha_cross_attn.data.zero_()
module.alpha_dense.data.zero_()
elif self.config.alpha_initializer == 'ones':
module.alpha_cross_attn.data.fill_(1.0)
module.alpha_dense.data.fill_(1.0)
elif self.config.alpha_initializer in {'normal', 'gaussian', 'random'}:
module.alpha_cross_attn.data.normal_(mean=0.0, std=self.config.alphas_initializer_range)
module.alpha_dense.data.normal_(mean=0.0, std=self.config.alphas_initializer_range)
elif isinstance(module, IdeficsPerceiverResampler):
module.latents.data.normal_()
|
@auto_docstring
class IdeficsPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 13
| 0
| 10
| 3
| 5
| 0.22
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 22
| 1
| 18
| 10
| 16
| 4
| 17
| 10
| 15
| 5
| 1
| 2
| 5
|
3,046
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/modeling_idefics.py
|
transformers.models.idefics.modeling_idefics.IdeficsRMSNorm
|
from torch import nn
import torch.nn.functional as F
import torch
class IdeficsRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
IdeficsRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
class IdeficsRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
IdeficsRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 4
| 1
| 6
| 1
| 4
| 1
| 1
| 0.31
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 21
| 4
| 13
| 7
| 9
| 4
| 13
| 7
| 9
| 2
| 1
| 1
| 4
|
3,047
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/perceiver.py
|
transformers.models.idefics.perceiver.IdeficsMLP
|
from typing import Optional
import torch.nn as nn
from .configuration_idefics import IdeficsConfig
import torch
class IdeficsMLP(nn.Module):
def __init__(self, intermediate_size, config: IdeficsConfig):
"""Simple MLP block with intermediate_size and embedding size"""
super().__init__()
self.embed_dim = config.vision_config.embed_dim
self.ln = nn.LayerNorm(self.embed_dim)
self.fc = nn.Linear(self.embed_dim, intermediate_size, bias=False)
self.act = nn.ReLU()
self.c_proj = nn.Linear(intermediate_size, self.embed_dim, bias=False)
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.ln(hidden_states)
hidden_states = self.fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
return hidden_states
|
class IdeficsMLP(nn.Module):
def __init__(self, intermediate_size, config: IdeficsConfig):
'''Simple MLP block with intermediate_size and embedding size'''
pass
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
pass
| 3
| 1
| 8
| 1
| 7
| 1
| 1
| 0.07
| 1
| 2
| 1
| 0
| 2
| 5
| 2
| 12
| 17
| 2
| 14
| 8
| 11
| 1
| 14
| 8
| 11
| 1
| 1
| 0
| 2
|
3,048
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/perceiver.py
|
transformers.models.idefics.perceiver.IdeficsPerceiverAttention
|
import torch
import torch.nn as nn
class IdeficsPerceiverAttention(nn.Module):
def __init__(self, embed_dim: int, n_heads: int, head_dim: int, qk_layer_norms: bool) -> None:
"""Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
super().__init__()
self.embed_dim, self.n_heads, self.head_dim = (embed_dim, n_heads, head_dim)
self.qk_layer_norms = qk_layer_norms
self.context_layer_norm = nn.LayerNorm(self.embed_dim)
self.latents_layer_norm = nn.LayerNorm(self.embed_dim)
if self.qk_layer_norms:
self.q_layer_norm = nn.LayerNorm(self.head_dim)
self.k_layer_norm = nn.LayerNorm(self.head_dim)
self.qk_scale = self.head_dim ** (-0.5)
self.q_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
self.output_proj = nn.Linear(self.n_heads * self.head_dim, embed_dim, bias=False)
def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor:
"""
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
Args:
context (`torch.Tensor`):
Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample.
latents (`torch.Tensor`):
Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to.
Returns:
`torch.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross
from context.
"""
context = self.context_layer_norm(context)
latents = self.latents_layer_norm(latents)
batch_size, seq_length, embed_dim = context.shape[:3]
q = self.q_proj(latents)
k = self.k_proj(torch.cat([context, latents], dim=-2))
v = self.v_proj(torch.cat([context, latents], dim=-2))
q, k, v = [x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(1, 2) for x in (q, k, v)]
if self.qk_layer_norms:
q = self.q_layer_norm(q)
k = self.k_layer_norm(k)
scores = torch.einsum('... i d, ... j d -> ... i j', q * self.qk_scale, k)
stabilized_scores = scores - scores.amax(dim=-1, keepdim=True).detach()
attn = stabilized_scores.softmax(dim=-1)
resampled = torch.einsum('... i j, ... j d -> ... i d', attn, v)
return self.output_proj(resampled.transpose(1, 2).flatten(-2))
|
class IdeficsPerceiverAttention(nn.Module):
def __init__(self, embed_dim: int, n_heads: int, head_dim: int, qk_layer_norms: bool) -> None:
'''Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`'''
pass
def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor:
'''
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
Args:
context (`torch.Tensor`):
Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample.
latents (`torch.Tensor`):
Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to.
Returns:
`torch.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross
from context.
'''
pass
| 3
| 2
| 31
| 5
| 15
| 11
| 2
| 0.68
| 1
| 4
| 0
| 0
| 2
| 13
| 2
| 12
| 63
| 11
| 31
| 22
| 28
| 21
| 31
| 22
| 28
| 2
| 1
| 1
| 4
|
3,049
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/perceiver.py
|
transformers.models.idefics.perceiver.IdeficsPerceiverResampler
|
import torch.nn as nn
from .configuration_idefics import IdeficsConfig
import torch
class IdeficsPerceiverResampler(nn.Module):
def __init__(self, config: IdeficsConfig, embed_dim: int, depth: int, n_heads: int, head_dim: int, n_latents: int) -> None:
"""
Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
returns a Tensor of shape [bsz, n_latents, embed_dim]. :param embed_dim: Dimensionality of embeddings being fed
to the Perceiver Resampler (also dimensionality of latent embeddings *returned* by the Perceiver Resampler.
Could be e.g., VIT embed_dim, ResNet pool dim, and so on.
Args:
config (`IdeficsConfig`): config object
embed_dim (`int`): The size of each embedding vector
depth (`int`): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
n_heads (`int`): Number of heads in each Transformer block (for multi-headed self-attention).
head_dim (`int`): Dimensionality of each head projection in the Transformer block.
n_latents (`int`):
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
"""
super().__init__()
self.embed_dim, self.n_heads, self.head_dim, self.n_latents = (embed_dim, n_heads, head_dim, n_latents)
self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver
self.latents = nn.Parameter(torch.randn(self.n_latents, self.embed_dim), requires_grad=True)
self.intermediate_dim = self.embed_dim * 4 if not hasattr(config.vision_config, 'embed_dim') else config.vision_config.embed_dim * 4
self.blocks = nn.ModuleList([nn.ModuleList([IdeficsPerceiverAttention(self.embed_dim, self.n_heads, self.head_dim, self.qk_layer_norms), IdeficsMLP(self.intermediate_dim, config)]) for _ in range(depth)])
self.layer_norm = nn.LayerNorm(self.embed_dim)
def forward(self, context: torch.Tensor) -> torch.Tensor:
"""Resample arbitrary length context & *compress* down to self.n_latents latent embeddings"""
latents = self.latents.repeat(context.shape[0], 1, 1)
for attn, ff in self.blocks:
latents = attn(context, latents) + latents
latents = ff(latents) + latents
return self.layer_norm(latents)
|
class IdeficsPerceiverResampler(nn.Module):
def __init__(self, config: IdeficsConfig, embed_dim: int, depth: int, n_heads: int, head_dim: int, n_latents: int) -> None:
'''
Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
returns a Tensor of shape [bsz, n_latents, embed_dim]. :param embed_dim: Dimensionality of embeddings being fed
to the Perceiver Resampler (also dimensionality of latent embeddings *returned* by the Perceiver Resampler.
Could be e.g., VIT embed_dim, ResNet pool dim, and so on.
Args:
config (`IdeficsConfig`): config object
embed_dim (`int`): The size of each embedding vector
depth (`int`): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
n_heads (`int`): Number of heads in each Transformer block (for multi-headed self-attention).
head_dim (`int`): Dimensionality of each head projection in the Transformer block.
n_latents (`int`):
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
'''
pass
def forward(self, context: torch.Tensor) -> torch.Tensor:
'''Resample arbitrary length context & *compress* down to self.n_latents latent embeddings'''
pass
| 3
| 2
| 28
| 3
| 15
| 10
| 2
| 0.65
| 1
| 7
| 3
| 0
| 2
| 9
| 2
| 12
| 58
| 7
| 31
| 14
| 26
| 20
| 15
| 11
| 12
| 2
| 1
| 1
| 4
|
3,050
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/processing_idefics.py
|
transformers.models.idefics.processing_idefics.IdeficsImagesKwargs
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
from typing import Callable, Optional, Union
class IdeficsImagesKwargs(ImagesKwargs, total=False):
transform: Optional[Callable]
image_size: Optional[dict[str, int]]
image_mean: Optional[Union[float, list[float]]]
image_std: Optional[Union[float, list[float]]]
|
class IdeficsImagesKwargs(ImagesKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0
| 5
| 1
| 4
| 0
| 5
| 1
| 4
| 0
| 2
| 0
| 0
|
3,051
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/processing_idefics.py
|
transformers.models.idefics.processing_idefics.IdeficsProcessor
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
from ...feature_extraction_utils import BatchFeature
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils.deprecation import deprecate_kwarg
from typing import Callable, Optional, Union
from ...image_utils import ImageInput
class IdeficsProcessor(ProcessorMixin):
"""
Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor.
[`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`IdeficsImageProcessor`):
An instance of [`IdeficsImageProcessor`]. The image processor is a required input.
tokenizer (`LlamaTokenizerFast`):
An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input.
image_size (`int`, *optional*, defaults to 224):
Image size (assuming a square image)
add_end_of_utterance_token (`str`, *optional*):
The string representation of token representing end of utterance
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'IdeficsImageProcessor'
tokenizer_class = 'LlamaTokenizerFast'
def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs):
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
self.image_token_id = tokenizer.image_token_id if hasattr(tokenizer, 'image_token') else tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
self.default_image_dims = (self.image_processor.image_num_channels, self.image_processor.image_size, self.image_processor.image_size)
self.tokenizer_was_trained_with_end_of_utterance_token = '<end_of_utterance>' in self.tokenizer.special_tokens_map.get('additional_special_tokens', [])
@deprecate_kwarg(old_name='prompts', version='5.0.0', new_name='text', raise_if_both_names=True)
def __call__(self, images: Union[ImageInput, list[ImageInput], str, list[str], list[list[str]]]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput], list[list[TextInput]], list[list[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[IdeficsProcessorKwargs]) -> BatchFeature:
"""This method takes batched or non-batched prompts made of text and images and converts them into prompts that
the model was trained on and prepares the image pixel values for the model to process.
Args:
images (`Union[ImageInput, list[ImageInput], str, list[str], list[list[str]]]`):
either a single image or a batched list of images - can be passed in when text contains only text prompts,
in order to use the image-text-to-text behavior.
text (`Union[list[TextInput], [list[list[TextInput]]]]`):
either a single prompt or a batched list of prompts - see the detailed description immediately after
the end of the arguments doc section.
return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`):
The type of tensors to return. Can be one of:
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
Returns:
a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be
directly passed to `model.generate`
Detailed explanation:
Each entry in `text` is either a text to be passed as is or an image that will be processed.
An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved.
When the processor encounters an image it'll inject `<fake_token_around_image><image><fake_token_around_image>`
entry into the prompt.
Example:
```python
checkpoint = "HuggingFaceM4/idefics-9b"
processor = AutoProcessor.from_pretrained(checkpoint)
url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg"
img = processor.image_processor.fetch_images([url])[0]
prompts = [
"User:",
img,
"Describe this image.
Assistant: An image of two kittens in grass.
",
"User:",
"https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg",
"Describe this image.
Assistant:",
]
inputs = processor(text=prompts, return_tensors="pt")
generated_ids = model.generate(**inputs, max_length=100)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```
In this example the `prompts` will be converted into:
```
<s>User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
Assistant: An image of two kittens in grass.
User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
Assistant:'
```
and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the
`pixel_values` dict entry of the return value.
This example also exemplifies that images can be passed as objects or as text urls. It can be seen that the
first image is passed as object and the second one as a url.
To do training do:
```python
image_transform = transforms.Compose(
[
transforms.RandomResizedCrop(
(w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.ToTensor(),
transforms.Normalize(mean=self.image_mean, std=self.image_std),
]
)
inputs = processor(text=prompts, transform=image_transform, return_tensors="pt")
```
In order to help debug prompt generation enable `debug=True` which will show you what's happening.
"""
if images is None and text is None:
raise ValueError('You need to specify either `text` or `images` and `text`.')
if images is None:
prompts = text
elif text is not None:
if not isinstance(images, (list, tuple)):
images = [images]
if isinstance(text, str):
text = [text]
if isinstance(text, (list, tuple)) and len(text) != len(images):
raise ValueError('When providing both images and text arguments, the number of text prompts should be the same as the number of images.If you want to have several images per prompt, images should be nested as such: images=[[img1, img2], [img3, img4], ...] for text=[prompt1, prompt2, ...].')
if not all((isinstance(i, str) for i in text)):
raise ValueError('When using the image-text-to-text behavior, the prompts should only contain text.')
if isinstance(images[0], (list, tuple)):
prompts = [[sample, *image_list] for image_list, sample in zip(images, text)]
else:
prompts = list(zip(images, text))
output_kwargs = self._merge_kwargs(IdeficsProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
add_eos_token = output_kwargs['text_kwargs'].pop('add_eos_token', False)
add_end_of_utterance_token = output_kwargs['text_kwargs'].pop('add_end_of_utterance_token', None)
if add_end_of_utterance_token is None:
add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token
if not any((isinstance(i, (list, tuple)) for i in prompts)):
prompts = [prompts]
fake_token = '<fake_token_around_image>'
image_token = '<image>'
end_of_utterance_token = '<end_of_utterance>'
def image_tokens(last_was_image):
if last_was_image:
return image_token + fake_token
else:
return fake_token + image_token + fake_token
all_prompts = []
all_images = []
for sample in prompts:
full_text = f'{self.tokenizer.bos_token}'
image_objects = []
last_was_image = False
last_was_text = False
for i, item in enumerate(sample):
if i > 0:
last_was_text = bool(not last_was_image)
if isinstance(item, str):
item = item.strip(' ')
if is_url(item):
image = self.image_processor.fetch_images(item)
full_text += image_tokens(last_was_image)
image_objects.append(image)
last_was_image = True
else:
if add_end_of_utterance_token and last_was_text:
full_text += end_of_utterance_token
full_text += item
last_was_image = False
else:
full_text += image_tokens(last_was_image)
image_objects.append(item)
last_was_image = True
if add_eos_token:
full_text += self.tokenizer.eos_token
image_objects = self.image_processor(image_objects, **output_kwargs['images_kwargs'])
all_prompts.append(full_text)
all_images.append(image_objects)
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', 'pt')
text_encoding = self.tokenizer(all_prompts, **output_kwargs['text_kwargs'])
all_texts = text_encoding['input_ids']
all_attention_masks = text_encoding['attention_mask']
max_num_images = max((len(x) for x in all_images))
max_num_images = max(1, max_num_images)
at_least_one_image = sum((len(x) for x in all_images)) > 0
output_input_ids = []
output_images = []
output_attention_masks = []
for text_single, attention_mask, extracted_images in zip(all_texts, all_attention_masks, all_images):
padded_input_ids = text_single
image_count = padded_input_ids.count(self.image_token_id)
local_max_num_images = min(image_count, max_num_images)
current_images = extracted_images[:local_max_num_images]
if len(current_images) > 0:
if return_tensors == 'pt':
padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:])
padded_image_tensor[:current_images.size(0)] = current_images
elif return_tensors == 'pt':
padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims)
output_images.append(padded_image_tensor)
if return_tensors == 'pt':
output_input_ids.append(torch.tensor(padded_input_ids))
output_attention_masks.append(torch.tensor(attention_mask))
if return_tensors == 'pt':
output_input_ids = torch.stack(output_input_ids)
output_images = torch.stack(output_images)
output_attention_masks = torch.stack(output_attention_masks)
if at_least_one_image:
image_attention_mask, _ = image_attention_mask_for_packed_input_ids(output_input_ids, self.tokenizer, return_tensors)
image_attention_mask = incremental_to_binary_attention_mask(image_attention_mask, return_tensors, num_classes=max_num_images)
elif return_tensors == 'pt':
image_attention_mask = torch.zeros(output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool)
return BatchFeature(data={'input_ids': output_input_ids, 'attention_mask': output_attention_masks, 'pixel_values': output_images, 'image_attention_mask': image_attention_mask})
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(tokenizer_input_names + image_processor_input_names + ['image_attention_mask'])
| null | 7
| 2
| 55
| 7
| 32
| 16
| 7
| 0.56
| 1
| 11
| 2
| 0
| 5
| 4
| 5
| 22
| 351
| 50
| 193
| 70
| 170
| 108
| 132
| 54
| 125
| 32
| 2
| 5
| 42
|
3,052
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/processing_idefics.py
|
transformers.models.idefics.processing_idefics.IdeficsProcessorKwargs
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
class IdeficsProcessorKwargs(ProcessingKwargs, total=False):
text_kwargs: IdeficsTextKwargs
images_kwargs: IdeficsImagesKwargs
_defaults = {'text_kwargs': {'add_special_tokens': False, 'padding': 'longest', 'add_eos_token': False}, 'images_kwargs': {}, 'common_kwargs': {'return_tensors': 'pt'}}
|
class IdeficsProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0
| 12
| 2
| 11
| 0
| 4
| 2
| 3
| 0
| 3
| 0
| 0
|
3,053
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/processing_idefics.py
|
transformers.models.idefics.processing_idefics.IdeficsTextKwargs
|
from typing import Callable, Optional, Union
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
class IdeficsTextKwargs(TextKwargs, total=False):
add_eos_token: Optional[bool]
add_end_of_utterance_token: Optional[bool]
|
class IdeficsTextKwargs(TextKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 2
| 0
| 0
|
3,054
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/vision.py
|
transformers.models.idefics.vision.IdeficsVisionAttention
|
from torch import nn
from typing import Callable, Optional, Union
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from .configuration_idefics import IdeficsVisionConfig
import torch
class IdeficsVisionAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: IdeficsVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
if self.config._attn_implementation != 'flash_attention_2':
if attention_mask is not None and causal_attention_mask is not None:
attention_mask = attention_mask + causal_attention_mask
elif causal_attention_mask is not None:
attention_mask = causal_attention_mask
else:
self.is_causal = causal_attention_mask is not None
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class IdeficsVisionAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: IdeficsVisionConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 2
| 32
| 5
| 25
| 2
| 4
| 0.11
| 1
| 5
| 0
| 0
| 3
| 10
| 3
| 13
| 102
| 19
| 75
| 30
| 65
| 8
| 54
| 24
| 50
| 8
| 1
| 2
| 11
|
3,055
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/vision.py
|
transformers.models.idefics.vision.IdeficsVisionEmbeddings
|
import torch
from .configuration_idefics import IdeficsVisionConfig
from torch import nn
import math
class IdeficsVisionEmbeddings(nn.Module):
def __init__(self, config: IdeficsVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images.
Source:
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
"""
num_patches = embeddings.shape[1] - 1
pos_embed = self.position_embedding(self.position_ids)
num_positions = pos_embed.shape[1] - 1
if num_patches == num_positions and height == width:
return pos_embed
class_pos_embed = pos_embed[:, 0]
patch_pos_embed = pos_embed[:, 1:]
embed_dim = embeddings.shape[-1]
num_h_patches = height // self.config.patch_size
num_w_patches = width // self.config.patch_size
num_h_patches, num_w_patches = (num_h_patches + 0.1, num_w_patches + 0.1)
sqrt_num_positions = math.sqrt(num_positions)
patch_pos_embed = patch_pos_embed.reshape(1, int(sqrt_num_positions), int(sqrt_num_positions), embed_dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
fp32_upcasting = patch_pos_embed.dtype == torch.bfloat16
if fp32_upcasting:
logger.warning_once("Upcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead.")
patch_pos_embed = patch_pos_embed.to(torch.float)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, scale_factor=(num_h_patches / sqrt_num_positions, num_w_patches / sqrt_num_positions), mode='bicubic', align_corners=False)
if fp32_upcasting:
patch_pos_embed = patch_pos_embed.to(torch.bfloat16)
if int(num_h_patches) != patch_pos_embed.shape[-2] or int(num_w_patches) != patch_pos_embed.shape[-1]:
raise ValueError(f"Number of patches for images ({(int(num_h_patches), int(num_w_patches))}) don't match the shape of position embedding ({(patch_pos_embed.shape[-2], patch_pos_embed.shape[-1])})")
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, embed_dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if not interpolate_pos_encoding:
if height != self.image_size or width != self.image_size:
raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size}). You should try to set `interpolate_pos_encoding=True`")
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
|
class IdeficsVisionEmbeddings(nn.Module):
def __init__(self, config: IdeficsVisionConfig):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images.
Source:
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
'''
pass
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 4
| 1
| 31
| 4
| 24
| 3
| 3
| 0.15
| 1
| 7
| 1
| 0
| 3
| 9
| 3
| 13
| 97
| 13
| 74
| 28
| 70
| 11
| 53
| 28
| 49
| 5
| 1
| 2
| 10
|
3,056
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/vision.py
|
transformers.models.idefics.vision.IdeficsVisionEncoder
|
from .configuration_idefics import IdeficsVisionConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from typing import Callable, Optional, Union
import torch
from torch import nn
from ...utils import ModelOutput, can_return_tuple, logging
class IdeficsVisionEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`IdeficsVisionEncoderLayer`].
Args:
config: IdeficsVisionConfig
"""
def __init__(self, config: IdeficsVisionConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([IdeficsVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class IdeficsVisionEncoder(nn.Module):
'''
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`IdeficsVisionEncoderLayer`].
Args:
config: IdeficsVisionConfig
'''
def __init__(self, config: IdeficsVisionConfig):
pass
@can_return_tuple
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 4
| 2
| 43
| 5
| 25
| 13
| 7
| 0.61
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 95
| 13
| 51
| 19
| 40
| 31
| 27
| 11
| 24
| 12
| 1
| 2
| 13
|
3,057
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/vision.py
|
transformers.models.idefics.vision.IdeficsVisionEncoderLayer
|
from .configuration_idefics import IdeficsVisionConfig
from torch import nn
from typing import Callable, Optional, Union
import torch
from ...modeling_layers import GradientCheckpointingLayer
class IdeficsVisionEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: IdeficsVisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = IdeficsVisionAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = IdeficsVisionMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class IdeficsVisionEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: IdeficsVisionConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 23
| 3
| 16
| 5
| 2
| 0.31
| 1
| 6
| 3
| 0
| 2
| 5
| 2
| 12
| 48
| 6
| 32
| 17
| 23
| 10
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
3,058
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/vision.py
|
transformers.models.idefics.vision.IdeficsVisionMLP
|
from ...activations import ACT2FN
import torch
from torch import nn
class IdeficsVisionMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class IdeficsVisionMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
3,059
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/vision.py
|
transformers.models.idefics.vision.IdeficsVisionModelOutput
|
from ...utils import ModelOutput, can_return_tuple, logging
from typing import Callable, Optional, Union
import torch
from dataclasses import dataclass
@dataclass
class IdeficsVisionModelOutput(ModelOutput):
"""
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
Args:
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
class IdeficsVisionModelOutput(ModelOutput):
'''
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
Args:
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 4
| 5
| 5
| 4
| 17
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
3,060
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics/vision.py
|
transformers.models.idefics.vision.IdeficsVisionTransformer
|
from .configuration_idefics import IdeficsVisionConfig
from typing import Callable, Optional, Union
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from torch import nn
class IdeficsVisionTransformer(nn.Module):
def __init__(self, config: IdeficsVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = IdeficsVisionEmbeddings(config)
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = IdeficsVisionEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
"""
Returns:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
class IdeficsVisionTransformer(nn.Module):
def __init__(self, config: IdeficsVisionConfig):
pass
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
'''
Returns:
'''
pass
| 3
| 1
| 27
| 4
| 21
| 2
| 4
| 0.09
| 1
| 7
| 4
| 0
| 2
| 5
| 2
| 12
| 56
| 9
| 43
| 20
| 33
| 4
| 24
| 13
| 21
| 6
| 1
| 1
| 7
|
3,061
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/configuration_idefics2.py
|
transformers.models.idefics2.configuration_idefics2.Idefics2Config
|
from ...configuration_utils import PretrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
class Idefics2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Idefics2Model`]. It is used to instantiate a
Idefics2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the model of the Idefics2
[HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should cache the key/value pairs of the attention mechanism.
image_token_id (`int`, *optional*, defaults to 32001):
The id of the "image" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to tie the word embeddings with the token embeddings.
vision_config (`IdeficsVisionConfig` or `dict`, *optional*):
Custom vision config or dict
perceiver_config (`IdeficsPerceiverConfig` or `dict`, *optional*):
Custom perceiver config or dict
text_config (`MistralConfig` or `dict`, *optional*):
Custom text config or dict for the text model
Example:
```python
>>> from transformers import Idefics2Model, Idefics2Config
>>> # Initializing configuration
>>> configuration = Idefics2Config()
>>> # Initializing a model from the configuration
>>> model = Idefics2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'idefics2'
sub_configs = {'text_config': AutoConfig, 'perceiver_config': Idefics2PerceiverConfig, 'vision_config': Idefics2VisionConfig}
def __init__(self, use_cache=True, image_token_id=32001, tie_word_embeddings=False, vision_config=None, perceiver_config=None, text_config=None, **kwargs):
self.image_token_id = image_token_id
self.use_cache = use_cache
self.tie_word_embeddings = tie_word_embeddings
if perceiver_config is None:
self.perceiver_config = Idefics2PerceiverConfig()
logger.info('perciver_config is None, using default perceiver config')
elif isinstance(perceiver_config, dict):
self.perceiver_config = Idefics2PerceiverConfig(**perceiver_config)
elif isinstance(perceiver_config, Idefics2PerceiverConfig):
self.perceiver_config = perceiver_config
if vision_config is None:
self.vision_config = Idefics2VisionConfig()
logger.info('vision_config is None, using default vision config')
elif isinstance(vision_config, dict):
self.vision_config = Idefics2VisionConfig(**vision_config)
elif isinstance(vision_config, Idefics2VisionConfig):
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config['model_type'] = text_config.get('model_type', 'mistral')
text_config = CONFIG_MAPPING[text_config['model_type']](**text_config)
elif text_config is None:
logger.info('text_config is None, using default text config')
text_config = CONFIG_MAPPING['mistral'](max_position_embeddings=4096 * 8, rms_norm_eps=1e-05, pad_token_id=0, tie_word_embeddings=False)
self.text_config = text_config
if self.text_config.hidden_size != self.perceiver_config.hidden_size:
self.perceiver_config.hidden_size = self.text_config.hidden_size
self.perceiver_config.rms_norm_eps = self.text_config.rms_norm_eps
logger.warning_once("Perceiver config has a different `hidden_size` than text config, which means default values were used. In your model's config on the hub, add `hidden_size` and `rms_norm_eps` keys under the `perceiver_config` dict. ")
super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings)
|
class Idefics2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Idefics2Model`]. It is used to instantiate a
Idefics2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the model of the Idefics2
[HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should cache the key/value pairs of the attention mechanism.
image_token_id (`int`, *optional*, defaults to 32001):
The id of the "image" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to tie the word embeddings with the token embeddings.
vision_config (`IdeficsVisionConfig` or `dict`, *optional*):
Custom vision config or dict
perceiver_config (`IdeficsPerceiverConfig` or `dict`, *optional*):
Custom perceiver config or dict
text_config (`MistralConfig` or `dict`, *optional*):
Custom text config or dict for the text model
Example:
```python
>>> from transformers import Idefics2Model, Idefics2Config
>>> # Initializing configuration
>>> configuration = Idefics2Config()
>>> # Initializing a model from the configuration
>>> model = Idefics2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, use_cache=True, image_token_id=32001, tie_word_embeddings=False, vision_config=None, perceiver_config=None, text_config=None, **kwargs):
pass
| 2
| 1
| 53
| 5
| 47
| 1
| 11
| 0.57
| 1
| 4
| 2
| 0
| 1
| 6
| 1
| 1
| 95
| 10
| 54
| 19
| 43
| 31
| 28
| 10
| 26
| 11
| 1
| 1
| 11
|
3,062
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/configuration_idefics2.py
|
transformers.models.idefics2.configuration_idefics2.Idefics2PerceiverConfig
|
from ...configuration_utils import PretrainedConfig
class Idefics2PerceiverConfig(PretrainedConfig):
"""
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the perceiver block.
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
resampler_n_latents (`int`, *optional*, defaults to 64):
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
resampler_depth (`int`, *optional*, defaults to 3):
Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (<= 3).
resampler_n_heads (`int`, *optional*, defaults to 16):
Number of heads in each Transformer block (for multi-headed self-attention).
resampler_head_dim (`int`, *optional*, defaults to 96):
Dimensionality of each head projection in the Transformer block.
num_key_value_heads (`int`, *optional*, defaults to 4):
Number of key-value heads in the perceiver attention block.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation for initializing all weight matrices in the model.
"""
model_type = 'idefics2_perceiver'
def __init__(self, hidden_act='silu', hidden_size=4096, rms_norm_eps=1e-06, resampler_n_latents=64, resampler_depth=3, resampler_n_heads=16, resampler_head_dim=96, num_key_value_heads=4, attention_dropout=0.0, initializer_range=0.02, **kwargs):
self.hidden_act = hidden_act
self.hidden_size = hidden_size
self.rms_norm_eps = rms_norm_eps
self.resampler_n_latents = resampler_n_latents
self.resampler_depth = resampler_depth
self.resampler_n_heads = resampler_n_heads
self.num_key_value_heads = num_key_value_heads
self.resampler_head_dim = resampler_head_dim
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
if self.num_key_value_heads > self.resampler_n_heads:
raise ValueError(f'num_key_value_heads={self.num_key_value_heads} must be less than or equal to resampler_n_heads={self.resampler_n_heads}')
super().__init__(**kwargs)
|
class Idefics2PerceiverConfig(PretrainedConfig):
'''
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the perceiver block.
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
resampler_n_latents (`int`, *optional*, defaults to 64):
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
resampler_depth (`int`, *optional*, defaults to 3):
Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (<= 3).
resampler_n_heads (`int`, *optional*, defaults to 16):
Number of heads in each Transformer block (for multi-headed self-attention).
resampler_head_dim (`int`, *optional*, defaults to 96):
Dimensionality of each head projection in the Transformer block.
num_key_value_heads (`int`, *optional*, defaults to 4):
Number of key-value heads in the perceiver attention block.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation for initializing all weight matrices in the model.
'''
def __init__(self, hidden_act='silu', hidden_size=4096, rms_norm_eps=1e-06, resampler_n_latents=64, resampler_depth=3, resampler_n_heads=16, resampler_head_dim=96, num_key_value_heads=4, attention_dropout=0.0, initializer_range=0.02, **kwargs):
pass
| 2
| 1
| 28
| 0
| 28
| 0
| 2
| 0.77
| 1
| 2
| 0
| 0
| 1
| 9
| 1
| 1
| 56
| 3
| 30
| 24
| 16
| 23
| 15
| 12
| 13
| 2
| 1
| 1
| 2
|
3,063
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/configuration_idefics2.py
|
transformers.models.idefics2.configuration_idefics2.Idefics2VisionConfig
|
from ...configuration_utils import PretrainedConfig
class Idefics2VisionConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Idefics2VisionModel`]. It is used to instantiate a
Idefics2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
[google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics2 model
[HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation for initializing all weight matrices in the model.
Example:
```python
>>> from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
>>> from transformers.models.idefics2.configuration_idefics2 import Idefics2VisionConfig
>>> # Initializing a Idefics2VisionConfig with google/siglip-base-patch16-224 style configuration
>>> configuration = Idefics2VisionConfig()
>>> # Initializing a Idefics2VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration
>>> model = Idefics2VisionTransformer(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'idefics2_vision'
base_config_key = 'vision_config'
def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='gelu_pytorch_tanh', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
|
class Idefics2VisionConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Idefics2VisionModel`]. It is used to instantiate a
Idefics2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
[google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics2 model
[HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation for initializing all weight matrices in the model.
Example:
```python
>>> from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
>>> from transformers.models.idefics2.configuration_idefics2 import Idefics2VisionConfig
>>> # Initializing a Idefics2VisionConfig with google/siglip-base-patch16-224 style configuration
>>> configuration = Idefics2VisionConfig()
>>> # Initializing a Idefics2VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration
>>> model = Idefics2VisionTransformer(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='gelu_pytorch_tanh', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=0.02, **kwargs):
pass
| 2
| 1
| 28
| 1
| 27
| 0
| 1
| 1.43
| 1
| 1
| 0
| 0
| 1
| 11
| 1
| 1
| 83
| 10
| 30
| 29
| 14
| 43
| 16
| 15
| 14
| 1
| 1
| 0
| 1
|
3,064
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/image_processing_idefics2.py
|
transformers.models.idefics2.image_processing_idefics2.Idefics2ImageProcessor
|
import numpy as np
from typing import Any, Optional, Union
from ...image_transforms import PaddingMode, pad, resize, to_channel_dimension_format
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_nested_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...utils import TensorType, is_vision_available, logging
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from collections.abc import Iterable
class Idefics2ImageProcessor(BaseImageProcessor):
"""
Constructs a Idefics image processor.
Args:
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA.
Only has an effect if the input image is in the PIL format.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image. The longest edge of the image is resized to be <= `size["longest_edge"]`, with the
shortest edge resized to keep the input aspect ratio, with a minimum size of `size["shortest_edge"]`.
size (`Dict`, *optional*):
Controls the size of the output image. This is a dictionary containing the keys "shortest_edge" and "longest_edge".
resample (`Resampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image. If set to `True`, the image is rescaled to have pixel values between 0 and 1.
rescale_factor (`float`, *optional*, defaults to `1/255`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. If set to `True`, the image is normalized to have a mean of `image_mean` and
a standard deviation of `image_std`.
image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether or not to pad the images to the largest height and width in the batch and number of images per
sample in the batch, such that the returned tensor is of shape (batch_size, max_num_images, num_channels, max_height, max_width).
do_image_splitting (`bool`, *optional*, defaults to `False`):
Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. That
strategy was first introduced in https://huggingface.co/papers/2311.06607.
"""
model_input_names = ['pixel_values', 'pixel_attention_mask']
def __init__(self, do_convert_rgb: bool=True, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: bool=True, do_image_splitting: bool=False, **kwargs) -> None:
super().__init__(**kwargs)
self.do_convert_rgb = do_convert_rgb
self.do_resize = do_resize
self.size = size if size is not None else {'shortest_edge': 378, 'longest_edge': 980}
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_pad = do_pad
self.do_image_splitting = do_image_splitting
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
if 'shortest_edge' in size and 'longest_edge' in size:
size = get_resize_output_image_size(image, size, input_data_format)
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
else:
raise ValueError("size must be a dictionary with keys 'shortest_edge' and 'longest_edge' or 'height' and 'width'.")
return resize(image, size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
return padded_image
def pad(self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
"""
For a list of images, for each images, pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width.
For each sample in the batch, pads the sample with empty images to the max_number of images per sample in the batch. Optionally returns a pixel mask.
Args:
images (`np.ndarray`):
List of list of images to pad. Pads to the largest height and width in the batch.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
pad_size = get_max_height_width(images, input_data_format=input_data_format)
batch_size = len(images)
max_num_images = max((len(images_) for images_ in images))
input_data_format = infer_channel_dimension_format(images[0][0]) if input_data_format is None else input_data_format
data_format = input_data_format if data_format is None else data_format
def empty_image(size, input_data_format):
if input_data_format == ChannelDimension.FIRST:
return np.zeros((3, *size), dtype=np.uint8)
elif input_data_format == ChannelDimension.LAST:
return np.zeros((*size, 3), dtype=np.uint8)
raise ValueError('Invalid channel dimension format.')
padded_images_list = [[empty_image(pad_size, data_format) for _ in range(max_num_images)] for _ in range(batch_size)]
padded_masks = [[np.zeros(pad_size) for _ in range(max_num_images)] for _ in range(batch_size)]
for batch_idx in range(batch_size):
for sample_idx, image in enumerate(images[batch_idx]):
padded_images_list[batch_idx][sample_idx] = self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
padded_masks[batch_idx][sample_idx] = make_pixel_mask(image, output_size=pad_size, input_data_format=input_data_format)
padded_masks = padded_masks if return_pixel_mask else None
return (padded_images_list, padded_masks)
def _crop(self, im: np.ndarray, w1: int, h1: int, w2: int, h2: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
if input_data_format == ChannelDimension.FIRST:
return im[:, h1:h2, w1:w2]
elif input_data_format == ChannelDimension.LAST:
return im[h1:h2, w1:w2, :]
def split_image(self, image: np.ndarray, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Split an image into 4 equal sub-images, and the concatenate that sequence with the original image.
That means that a single image becomes a sequence of 5 images.
This is a "trick" to spend more compute on each image with no changes in the vision encoder.
Args:
image (`np.ndarray`):
Images to split.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
height, width = get_image_size(image, input_data_format)
mid_width = width // 2
mid_height = height // 2
return [self._crop(image, 0, 0, mid_width, mid_height, input_data_format), self._crop(image, mid_width, 0, width, mid_height, input_data_format), self._crop(image, 0, mid_height, mid_width, height, input_data_format), self._crop(image, mid_width, mid_height, width, height, input_data_format), image]
def preprocess(self, images: ImageInput, do_convert_rgb: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, do_image_splitting: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, input_data_format: Optional[ChannelDimension]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST):
"""
Preprocess a batch of images.
Args:
images (`ImageInput`):
A list of images to preprocess.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether or not to pad the images to the largest height and width in the batch.
do_image_splitting (`bool`, *optional*, defaults to `self.do_image_splitting`):
Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. That
strategy was first introduced in https://huggingface.co/papers/2311.06607.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_pad = do_pad if do_pad is not None else self.do_pad
do_image_splitting = do_image_splitting if do_image_splitting is not None else self.do_image_splitting
images = self.fetch_images(images)
images_list = make_nested_list_of_images(images)
if not valid_images(images_list[0]):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
if do_convert_rgb:
images_list = [[convert_to_rgb(image) for image in images] for images in images_list]
images_list = [[to_numpy_array(image) for image in images] for images in images_list]
first_image_in_list = [images for images in images_list if images][0][0]
if do_rescale and is_scaled_image(first_image_in_list):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(first_image_in_list)
if do_image_splitting:
new_images_list = []
for images in images_list:
new_images = []
for image in images:
new_images.extend(self.split_image(image, input_data_format))
new_images_list.append(new_images)
images_list = new_images_list
if do_resize:
images_list = [[self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] for images in images_list]
if do_rescale:
images_list = [[self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] for images in images_list]
if do_normalize:
images_list = [[self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] for images in images_list]
pixel_attention_mask = None
if do_pad:
images_list, pixel_attention_mask = self.pad(images_list, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=input_data_format)
if data_format is not None:
images_list = [[to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] for images in images_list]
data = {'pixel_values': np.array(images_list) if do_pad else images_list}
if pixel_attention_mask is not None:
data['pixel_attention_mask'] = np.array(pixel_attention_mask) if do_pad else pixel_attention_mask
return BatchFeature(data=data, tensor_type=return_tensors)
|
class Idefics2ImageProcessor(BaseImageProcessor):
'''
Constructs a Idefics image processor.
Args:
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA.
Only has an effect if the input image is in the PIL format.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image. The longest edge of the image is resized to be <= `size["longest_edge"]`, with the
shortest edge resized to keep the input aspect ratio, with a minimum size of `size["shortest_edge"]`.
size (`Dict`, *optional*):
Controls the size of the output image. This is a dictionary containing the keys "shortest_edge" and "longest_edge".
resample (`Resampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image. If set to `True`, the image is rescaled to have pixel values between 0 and 1.
rescale_factor (`float`, *optional*, defaults to `1/255`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. If set to `True`, the image is normalized to have a mean of `image_mean` and
a standard deviation of `image_std`.
image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether or not to pad the images to the largest height and width in the batch and number of images per
sample in the batch, such that the returned tensor is of shape (batch_size, max_num_images, num_channels, max_height, max_width).
do_image_splitting (`bool`, *optional*, defaults to `False`):
Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. That
strategy was first introduced in https://huggingface.co/papers/2311.06607.
'''
def __init__(self, do_convert_rgb: bool=True, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: bool=True, do_image_splitting: bool=False, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Pad an image with zeros to the given size.
'''
pass
def pad(self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
'''
For a list of images, for each images, pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width.
For each sample in the batch, pads the sample with empty images to the max_number of images per sample in the batch. Optionally returns a pixel mask.
Args:
images (`np.ndarray`):
List of list of images to pad. Pads to the largest height and width in the batch.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def empty_image(size, input_data_format):
pass
def _crop(self, im: np.ndarray, w1: int, h1: int, w2: int, h2: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
pass
def split_image(self, image: np.ndarray, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Split an image into 4 equal sub-images, and the concatenate that sequence with the original image.
That means that a single image becomes a sequence of 5 images.
This is a "trick" to spend more compute on each image with no changes in the vision encoder.
Args:
image (`np.ndarray`):
Images to split.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def preprocess(self, images: ImageInput, do_convert_rgb: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, do_image_splitting: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, input_data_format: Optional[ChannelDimension]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST):
'''
Preprocess a batch of images.
Args:
images (`ImageInput`):
A list of images to preprocess.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether or not to pad the images to the largest height and width in the batch.
do_image_splitting (`bool`, *optional*, defaults to `self.do_image_splitting`):
Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. That
strategy was first introduced in https://huggingface.co/papers/2311.06607.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 9
| 6
| 47
| 3
| 31
| 13
| 6
| 0.57
| 1
| 11
| 3
| 0
| 7
| 11
| 7
| 27
| 416
| 35
| 243
| 109
| 168
| 139
| 103
| 42
| 94
| 27
| 3
| 3
| 48
|
3,065
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2BaseModelOutputWithPast
|
from ...cache_utils import Cache, DynamicCache
from dataclasses import dataclass
from ...modeling_outputs import BaseModelOutput, ModelOutput
import torch
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
@dataclass
@auto_docstring(custom_intro="\n Base class for Idefics2 model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ")
class Idefics2BaseModelOutputWithPast(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Base class for Idefics2 model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ")
class Idefics2BaseModelOutputWithPast(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4.83
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 1
| 6
| 6
| 5
| 29
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
3,066
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2CausalLMOutputWithPast
|
import torch
from dataclasses import dataclass
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache
from ...modeling_outputs import BaseModelOutput, ModelOutput
@dataclass
@auto_docstring(custom_intro='\n Base class for Idefics2 causal language model (or autoregressive) outputs.\n ')
class Idefics2CausalLMOutputWithPast(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for Idefics2 causal language model (or autoregressive) outputs.\n ')
class Idefics2CausalLMOutputWithPast(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.71
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 1
| 7
| 7
| 6
| 26
| 7
| 7
| 6
| 0
| 1
| 0
| 0
|
3,067
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2Connector
|
from torch import nn
class Idefics2Connector(nn.Module):
def __init__(self, config):
super().__init__()
self.modality_projection = Idefics2MLP(hidden_size=config.vision_config.hidden_size, intermediate_size=config.text_config.intermediate_size, output_size=config.text_config.hidden_size, hidden_act=config.text_config.hidden_act)
self.perceiver_resampler = Idefics2PerceiverResampler._from_config(config.perceiver_config)
def forward(self, image_hidden_states, attention_mask):
image_hidden_states = self.modality_projection(image_hidden_states)
image_hidden_states = self.perceiver_resampler(context=image_hidden_states, attention_mask=attention_mask)
return image_hidden_states
|
class Idefics2Connector(nn.Module):
def __init__(self, config):
pass
def forward(self, image_hidden_states, attention_mask):
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 3
| 2
| 0
| 2
| 2
| 2
| 12
| 15
| 1
| 14
| 5
| 11
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
3,068
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2Encoder
|
from ...modeling_outputs import BaseModelOutput, ModelOutput
from .configuration_idefics2 import Idefics2Config, Idefics2PerceiverConfig, Idefics2VisionConfig
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
import torch
from torch import nn
from typing import Callable, Optional, Union
class Idefics2Encoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Idefics2EncoderLayer`].
Args:
config: Idefics2Config
"""
def __init__(self, config: Idefics2Config):
super().__init__()
self.config = config
self.layers = nn.ModuleList([Idefics2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@auto_docstring
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutput:
hidden_states = inputs_embeds
for encoder_layer in self.layers:
hidden_states = encoder_layer(hidden_states, attention_mask, **kwargs)
return BaseModelOutput(last_hidden_state=hidden_states)
|
class Idefics2Encoder(nn.Module):
'''
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Idefics2EncoderLayer`].
Args:
config: Idefics2Config
'''
def __init__(self, config: Idefics2Config):
pass
@auto_docstring
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutput:
pass
| 4
| 1
| 38
| 4
| 24
| 10
| 7
| 0.56
| 1
| 8
| 3
| 0
| 2
| 3
| 2
| 12
| 86
| 11
| 48
| 18
| 38
| 27
| 27
| 11
| 24
| 12
| 1
| 2
| 13
|
3,069
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2EncoderLayer
|
from torch import nn
from ...modeling_layers import GradientCheckpointingLayer
from ...processing_utils import Unpack
import torch
from .configuration_idefics2 import Idefics2Config, Idefics2PerceiverConfig, Idefics2VisionConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
class Idefics2EncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Idefics2VisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = Idefics2VisionAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = Idefics2VisionMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class Idefics2EncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Idefics2VisionConfig):
pass
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
pass
| 4
| 0
| 22
| 3
| 15
| 5
| 2
| 0.37
| 1
| 5
| 2
| 0
| 2
| 5
| 2
| 12
| 47
| 6
| 30
| 16
| 22
| 11
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
3,070
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration
|
from ...generation import GenerationMixin
import torch
from ...cache_utils import Cache, DynamicCache
from torch import nn
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n The Idefics2 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top.\n ')
class Idefics2ForConditionalGeneration(Idefics2PreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
super().__init__(config)
self.model = Idefics2Model(config)
self.image_token_id = self.config.image_token_id
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.vocab_size = config.text_config.vocab_size
self.post_init()
def enable_input_require_grads(self):
"""
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
the model weights fixed.
"""
def make_inputs_require_grads(module, input, output):
output.requires_grad_(True)
self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
self._vision_require_grads_hook = self.model.vision_model.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
def disable_input_require_grads(self):
self._text_require_grads_hook.remove()
self._vision_require_grads_hook.remove()
def get_input_embeddings(self):
return self.model.text_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.text_model.set_input_embeddings(value)
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.LongTensor]=None):
return self.model.get_image_features(pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Idefics2CausalLMOutputWithPast]:
"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection and perceiver resampling.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics2ForConditionalGeneration`).
Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModelForVision2Seq
>>> from transformers.image_utils import load_image
>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
>>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b-base")
>>> model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/idefics2-8b-base", device_map="auto")
>>> BAD_WORDS_IDS = processor.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
>>> EOS_WORDS_IDS = [processor.tokenizer.eos_token_id]
>>> # Create inputs
>>> prompts = [
... "<image>In this image, we can see the city of New York, and more specifically the Statue of Liberty.<image>In this image,",
... "In which city is that bridge located?<image>",
... ]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(images=images, text=prompts, padding=True, return_tensors="pt").to("cuda")
>>> # Generate
>>> generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=20)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> print(generated_texts)
['In this image, we can see the city of New York, and more specifically the Statue of Liberty. In this image, we can see the city of New York, and more specifically the Statue of Liberty.\\n\\n', 'In which city is that bridge located?\\n\\nThe bridge is located in the city of Pittsburgh, Pennsylvania.\\n\\n\\nThe bridge is']
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_hidden_states=image_hidden_states, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, return_dict=True, **kwargs)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
return Idefics2CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, pixel_values=None, pixel_attention_mask=None, image_hidden_states=None, logits_to_keep=None, **kwargs):
model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_hidden_states=image_hidden_states, logits_to_keep=logits_to_keep, **kwargs)
if image_hidden_states is not None or cache_position[0] != 0:
model_inputs['pixel_values'] = None
model_inputs['pixel_attention_mask'] = None
return model_inputs
|
@auto_docstring(custom_intro='\n The Idefics2 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top.\n ')
class Idefics2ForConditionalGeneration(Idefics2PreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def enable_input_require_grads(self):
'''
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
the model weights fixed.
'''
pass
def make_inputs_require_grads(module, input, output):
pass
def disable_input_require_grads(self):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.LongTensor]=None):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Idefics2CausalLMOutputWithPast]:
'''
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection and perceiver resampling.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics2ForConditionalGeneration`).
Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModelForVision2Seq
>>> from transformers.image_utils import load_image
>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
>>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b-base")
>>> model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/idefics2-8b-base", device_map="auto")
>>> BAD_WORDS_IDS = processor.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
>>> EOS_WORDS_IDS = [processor.tokenizer.eos_token_id]
>>> # Create inputs
>>> prompts = [
... "<image>In this image, we can see the city of New York, and more specifically the Statue of Liberty.<image>In this image,",
... "In which city is that bridge located?<image>",
... ]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(images=images, text=prompts, padding=True, return_tensors="pt").to("cuda")
>>> # Generate
>>> generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=20)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> print(generated_texts)
['In this image, we can see the city of New York, and more specifically the Statue of Liberty. In this image, we can see the city of New York, and more specifically the Statue of Liberty.\n\n', 'In which city is that bridge located?\n\nThe bridge is located in the city of Pittsburgh, Pennsylvania.\n\n\nThe bridge is']
```'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, pixel_values=None, pixel_attention_mask=None, image_hidden_states=None, logits_to_keep=None, **kwargs):
pass
| 13
| 2
| 20
| 2
| 13
| 5
| 2
| 0.41
| 2
| 8
| 2
| 0
| 10
| 6
| 11
| 12
| 259
| 37
| 158
| 64
| 113
| 65
| 81
| 34
| 68
| 9
| 2
| 2
| 29
|
3,071
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2MLP
|
from torch import nn
from ...activations import ACT2FN
class Idefics2MLP(nn.Module):
def __init__(self, hidden_size: int, intermediate_size: int, output_size: int, hidden_act: str):
super().__init__()
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.down_proj = nn.Linear(intermediate_size, output_size, bias=False)
self.act_fn = ACT2FN[hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
class Idefics2MLP(nn.Module):
def __init__(self, hidden_size: int, intermediate_size: int, output_size: int, hidden_act: str):
pass
def forward(self, x):
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 4
| 2
| 12
| 16
| 1
| 15
| 13
| 6
| 0
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
3,072
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2Model
|
from ...processing_utils import Unpack
from ..auto import AutoModel
from ...cache_utils import Cache, DynamicCache
import torch
from typing import Callable, Optional, Union
from .configuration_idefics2 import Idefics2Config, Idefics2PerceiverConfig, Idefics2VisionConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...modeling_flash_attention_utils import FlashAttentionKwargs
@auto_docstring(custom_intro='\n Idefics2 model consisting of a SIGLIP vision encoder and Mistral language decoder\n ')
class Idefics2Model(Idefics2PreTrainedModel):
def __init__(self, config: Idefics2Config):
super().__init__(config)
self.padding_idx = self.config.text_config.pad_token_id
self.vocab_size = self.config.text_config.vocab_size
self.vision_model = Idefics2VisionTransformer._from_config(config.vision_config)
self.connector = Idefics2Connector(config)
self.text_model = AutoModel.from_config(config.text_config)
self.image_seq_len = config.perceiver_config.resampler_n_latents
self.image_token_id = self.config.image_token_id
self.post_init()
def enable_input_require_grads(self):
"""
Enables the gradients for the input embeddings.
This is useful for lora when using gradient checkpointing.
c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032
Override to set output.requires_grad = True for both the decoder's and vision model's embeddings.
"""
def get_lowest_module(module):
if len(list(module.children())) == 0:
return module
else:
return get_lowest_module(list(module.children())[0])
def make_inputs_require_grads(module, input, output):
output.requires_grad_(True)
self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
self._vision_require_grads_hook = get_lowest_module(self.vision_model).register_forward_hook(make_inputs_require_grads)
def disable_input_require_grads(self):
self._text_require_grads_hook.remove()
self._vision_require_grads_hook.remove()
def get_input_embeddings(self):
return self.text_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.text_model.set_input_embeddings(value)
def inputs_merger(self, input_ids: torch.LongTensor, inputs_embeds: Optional[torch.Tensor], image_hidden_states: Optional[torch.Tensor]):
"""
This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM.
The merging happens as follows:
- The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`.
- We get the image hidden states for the image through the vision encoder (and potentially the perceiver), and that hidden state is then projected into the text embedding space.
We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer.
- The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM.
- To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device))
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
image_hidden_states = image_hidden_states.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_hidden_states)
return inputs_embeds
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.LongTensor]=None):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
pixel_attention_mask (`torch.LongTensor`, *optional*):
The attention mask indicating padded regions in the image.
"""
batch_size, num_images, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.to(dtype=self.dtype)
pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])
nb_values_per_image = pixel_values.shape[1:].numel()
real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image
pixel_values = pixel_values[real_images_inds].contiguous()
if pixel_attention_mask is None:
pixel_attention_mask = torch.ones(size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)), dtype=torch.bool, device=pixel_values.device)
else:
pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:])
pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()
patch_size = self.config.vision_config.patch_size
patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)
patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)
patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) == patch_size * patch_size).bool()
image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
image_hidden_states = image_hidden_states.last_hidden_state
image_hidden_states = self.connector(image_hidden_states, attention_mask=patch_attention_mask.view(pixel_values.size(0), -1))
image_hidden_states = image_hidden_states.view(-1, image_hidden_states.shape[-1])
return image_hidden_states
@can_return_tuple
@auto_docstring(custom_intro="\n Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to\n the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where\n max_num_images is the maximum number of images among the batch_size samples in the batch.\n\n Padding images are not needed beyond padding the pixel_values at the entrance of the model.\n For efficiency, we only pass through the vision_model's forward the real images by\n discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where\n image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.\n ")
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, Idefics2BaseModelOutputWithPast]:
"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection and perceiver resampling.
"""
if self.training and self.text_model.gradient_checkpointing and use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.text_model.get_input_embeddings()(input_ids)
if pixel_values is not None and image_hidden_states is not None:
raise ValueError('You cannot specify both pixel_values and image_hidden_states at the same time')
elif pixel_values is not None:
image_hidden_states = self.get_image_features(pixel_values, pixel_attention_mask)
elif image_hidden_states is not None:
image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device)
if image_hidden_states is not None:
inputs_embeds = self.inputs_merger(input_ids=input_ids, inputs_embeds=inputs_embeds, image_hidden_states=image_hidden_states)
kwargs['return_dict'] = True
outputs = self.text_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs)
return Idefics2BaseModelOutputWithPast(last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_hidden_states)
|
@auto_docstring(custom_intro='\n Idefics2 model consisting of a SIGLIP vision encoder and Mistral language decoder\n ')
class Idefics2Model(Idefics2PreTrainedModel):
def __init__(self, config: Idefics2Config):
pass
def enable_input_require_grads(self):
'''
Enables the gradients for the input embeddings.
This is useful for lora when using gradient checkpointing.
c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032
Override to set output.requires_grad = True for both the decoder's and vision model's embeddings.
'''
pass
def get_lowest_module(module):
pass
def make_inputs_require_grads(module, input, output):
pass
def disable_input_require_grads(self):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def inputs_merger(self, input_ids: torch.LongTensor, inputs_embeds: Optional[torch.Tensor], image_hidden_states: Optional[torch.Tensor]):
'''
This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM.
The merging happens as follows:
- The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`.
- We get the image hidden states for the image through the vision encoder (and potentially the perceiver), and that hidden state is then projected into the text embedding space.
We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer.
- The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM.
- To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states.
'''
pass
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.LongTensor]=None):
'''
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
pixel_attention_mask (`torch.LongTensor`, *optional*):
The attention mask indicating padded regions in the image.
'''
pass
@can_return_tuple
@auto_docstring(custom_intro="\n Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to\n the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where\n max_num_images is the maximum number of images among the batch_size samples in the batch.\n\n Padding images are not needed beyond padding the pixel_values at the entrance of the model.\n For efficiency, we only pass through the vision_model's forward the real images by\n discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where\n image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.\n ")
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, Idefics2BaseModelOutputWithPast]:
'''
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection and perceiver resampling.
'''
pass
| 14
| 4
| 24
| 3
| 18
| 3
| 3
| 0.17
| 1
| 13
| 7
| 0
| 7
| 11
| 7
| 8
| 226
| 33
| 166
| 67
| 125
| 29
| 88
| 35
| 78
| 20
| 2
| 3
| 29
|
3,073
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2MultiheadAttentionPoolingHead
|
from torch import nn
from .configuration_idefics2 import Idefics2Config, Idefics2PerceiverConfig, Idefics2VisionConfig
import torch
class Idefics2MultiheadAttentionPoolingHead(nn.Module):
"""Multihead Attention Pooling."""
def __init__(self, config: Idefics2VisionConfig):
super().__init__()
self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = Idefics2MLP(hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, output_size=config.hidden_size)
def forward(self, hidden_state):
batch_size = hidden_state.shape[0]
probe = self.probe.repeat(batch_size, 1, 1)
hidden_state = self.attention(probe, hidden_state, hidden_state)[0]
residual = hidden_state
hidden_state = self.layernorm(hidden_state)
hidden_state = residual + self.mlp(hidden_state)
return hidden_state[:, 0]
|
class Idefics2MultiheadAttentionPoolingHead(nn.Module):
'''Multihead Attention Pooling.'''
def __init__(self, config: Idefics2VisionConfig):
pass
def forward(self, hidden_state):
pass
| 3
| 1
| 12
| 2
| 10
| 1
| 1
| 0.1
| 1
| 3
| 2
| 0
| 2
| 4
| 2
| 12
| 28
| 6
| 20
| 10
| 17
| 2
| 15
| 10
| 12
| 1
| 1
| 0
| 2
|
3,074
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2PerceiverAttention
|
from typing import Callable, Optional, Union
import torch
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...processing_utils import Unpack
from ...utils.deprecation import deprecate_kwarg
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...cache_utils import Cache, DynamicCache
class Idefics2PerceiverAttention(nn.Module):
def __init__(self, config, layer_idx: Optional[int]=None) -> None:
"""Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
super().__init__()
self.config = config
self.layer_idx = None
self.hidden_size = config.hidden_size
self.num_heads = config.resampler_n_heads
self.head_dim = config.resampler_head_dim
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.attention_dropout = config.attention_dropout
self.scaling = self.head_dim ** (-0.5)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = False
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
Args:
latents (`torch.Tensor`): Tensor of shape [bsz, n_latents, embed_dim] representing fixed length latents to compress to.
context (`torch.Tensor`): Tensor of shape [bsz, seq, embed_dim] representing long-form context to resample.
attention_mask (`torch.Tensor`, *optional*): Tensor of shape [bsz, 1, seq, n_latents] representing attention mask.
position_ids (`torch.LongTensor`, *optional*): Tensor of shape [bsz, seq] representing position indices of each input token.
past_key_values (`Cache`, *optional*): Tuple of tensors containing cached key and value states.
output_attentions (`bool`, *optional*, defaults to `False`): Whether to return attention weights.
use_cache (`bool`, *optional*, defaults to `False`): Whether to use past_key_values for caching.
"""
bsz, q_len, _ = latents.size()
kv_seq_len = q_len + context.size()[1]
hidden_states = torch.concat([context, latents], dim=-2)
queries = self.q_proj(latents)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
values = values.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
past_key_values = getattr(self, 'past_key_values', past_key_values)
if past_key_values is not None:
keys, values = past_key_values.update(keys, values, self.layer_idx)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, **kwargs)
attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim)
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class Idefics2PerceiverAttention(nn.Module):
def __init__(self, config, layer_idx: Optional[int]=None) -> None:
'''Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`'''
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
Args:
latents (`torch.Tensor`): Tensor of shape [bsz, n_latents, embed_dim] representing fixed length latents to compress to.
context (`torch.Tensor`): Tensor of shape [bsz, seq, embed_dim] representing long-form context to resample.
attention_mask (`torch.Tensor`, *optional*): Tensor of shape [bsz, 1, seq, n_latents] representing attention mask.
position_ids (`torch.LongTensor`, *optional*): Tensor of shape [bsz, seq] representing position indices of each input token.
past_key_values (`Cache`, *optional*): Tuple of tensors containing cached key and value states.
output_attentions (`bool`, *optional*, defaults to `False`): Whether to return attention weights.
use_cache (`bool`, *optional*, defaults to `False`): Whether to use past_key_values for caching.
'''
pass
| 4
| 2
| 49
| 10
| 32
| 7
| 4
| 0.22
| 1
| 5
| 0
| 1
| 2
| 12
| 2
| 12
| 99
| 21
| 64
| 32
| 52
| 14
| 47
| 23
| 44
| 7
| 1
| 2
| 8
|
3,075
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2PerceiverLayer
|
from ...processing_utils import Unpack
from ...cache_utils import Cache, DynamicCache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.deprecation import deprecate_kwarg
from typing import Callable, Optional, Union
from torch import nn
import torch
class Idefics2PerceiverLayer(nn.Module):
def __init__(self, config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.n_latents = config.resampler_n_latents
self.depth = config.resampler_depth
self.rms_norm_eps = config.rms_norm_eps
self.input_latents_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
self.input_context_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
self.self_attn = Idefics2PerceiverAttention(config, layer_idx=layer_idx)
self.post_attention_layernorm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
self.mlp = Idefics2MLP(hidden_size=config.hidden_size, intermediate_size=config.hidden_size * 4, output_size=config.hidden_size, hidden_act=config.hidden_act)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
"""
Args:
latents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
context (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Cache`, *optional*): cached past key and value projection states
"""
residual = latents
latents = self.input_latents_norm(latents)
context = self.input_context_norm(context)
latents, _ = self.self_attn(latents=latents, context=context, attention_mask=attention_mask, **kwargs)
latents = residual + latents
residual = latents
latents = self.post_attention_layernorm(latents)
latents = self.mlp(latents)
latents = residual + latents
return latents
|
class Idefics2PerceiverLayer(nn.Module):
def __init__(self, config, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
'''
Args:
latents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
context (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Cache`, *optional*): cached past key and value projection states
'''
pass
| 4
| 1
| 34
| 4
| 23
| 7
| 2
| 0.3
| 1
| 6
| 2
| 0
| 2
| 9
| 2
| 12
| 70
| 9
| 47
| 25
| 34
| 14
| 28
| 15
| 25
| 3
| 1
| 1
| 4
|
3,076
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2PerceiverResampler
|
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from torch import nn
import torch
from .configuration_idefics2 import Idefics2Config, Idefics2PerceiverConfig, Idefics2VisionConfig
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
from ...processing_utils import Unpack
@auto_docstring(custom_intro='\n Idefics2 perceiver resampler model that performs `depth` blocks of cross-attention with a fixed\n ')
class Idefics2PerceiverResampler(Idefics2PreTrainedModel):
config: Idefics2PerceiverConfig
_supports_sdpa = True
_supports_flash_attention_2 = True
_supports_flex_attn = True
def __init__(self, config) -> None:
super().__init__(config)
self.hidden_size = config.hidden_size
self.hidden_act = config.hidden_act
self.n_latents = config.resampler_n_latents
self.depth = config.resampler_depth
self.rms_norm_eps = config.rms_norm_eps
self.latents = nn.Parameter(torch.ones(self.n_latents, self.hidden_size))
self.layers = nn.ModuleList([Idefics2PerceiverLayer(config, idx) for idx in range(self.depth)])
self.norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
@auto_docstring
def forward(self, context: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
"""
context (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`):
Input to the layer.
"""
latents = self.latents.unsqueeze(0).expand((context.shape[0], *self.latents.size()))
latent_attention_mask = torch.ones((attention_mask.size(0), latents.size(1)), dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat([attention_mask, latent_attention_mask], dim=-1)
attention_mask = _prepare_4d_attention_mask(attention_mask, latents.dtype, tgt_len=self.n_latents) if self.config._attn_implementation != 'flash_attention_2' else attention_mask
compressed_context = latents
for perceiver_layer in self.layers:
compressed_context = perceiver_layer(compressed_context, context, attention_mask=attention_mask, position_ids=None, **kwargs)
compressed_context = self.norm(compressed_context)
return compressed_context
|
@auto_docstring(custom_intro='\n Idefics2 perceiver resampler model that performs `depth` blocks of cross-attention with a fixed\n ')
class Idefics2PerceiverResampler(Idefics2PreTrainedModel):
def __init__(self, config) -> None:
pass
@auto_docstring
def forward(self, context: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
'''
context (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`):
Input to the layer.
'''
pass
| 5
| 1
| 26
| 4
| 20
| 2
| 2
| 0.07
| 1
| 5
| 2
| 0
| 2
| 9
| 2
| 3
| 56
| 10
| 43
| 23
| 36
| 3
| 25
| 19
| 22
| 3
| 2
| 1
| 4
|
3,077
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2PreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_idefics2 import Idefics2Config, Idefics2PerceiverConfig, Idefics2VisionConfig
from torch import nn
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
@auto_docstring
class Idefics2PreTrainedModel(PreTrainedModel):
config: Idefics2Config
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['Idefics2VisionAttention', 'Idefics2MLP', 'Idefics2PerceiverLayer', 'Idefics2DecoderLayer']
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
def _init_weights(self, module):
std = getattr(self.config, 'initializer_range', self.config.get_text_config().initializer_range)
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
elif isinstance(module, Idefics2RMSNorm):
module.weight.data.fill_(1.0)
elif isinstance(module, nn.MultiheadAttention):
module._reset_parameters()
elif isinstance(module, Idefics2MultiheadAttentionPoolingHead):
module.probe.data.normal_()
elif isinstance(module, Idefics2PerceiverResampler):
module.latents.data.fill_(1.0)
|
@auto_docstring
class Idefics2PreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 18
| 2
| 16
| 0
| 7
| 0
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 28
| 3
| 25
| 11
| 23
| 0
| 20
| 11
| 18
| 7
| 1
| 2
| 7
|
3,078
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2RMSNorm
|
import torch
from torch import nn
class Idefics2RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
Idefics2RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
class Idefics2RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
Idefics2RMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 4
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
3,079
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2VisionAttention
|
from typing import Callable, Optional, Union
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
from torch import nn
class Idefics2VisionAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.is_causal = False
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights)
|
class Idefics2VisionAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 2
| 36
| 7
| 28
| 2
| 4
| 0.09
| 1
| 4
| 0
| 1
| 2
| 11
| 2
| 12
| 77
| 15
| 57
| 26
| 49
| 5
| 41
| 21
| 38
| 5
| 1
| 2
| 7
|
3,080
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2VisionEmbeddings
|
from torch import nn
from .configuration_idefics2 import Idefics2Config, Idefics2PerceiverConfig, Idefics2VisionConfig
import torch
class Idefics2VisionEmbeddings(nn.Module):
"""
This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable
resolution.
The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://huggingface.co/papers/2307.06304)
which allows treating images in their native aspect ratio and without the need to resize them to the same
fixed size. In particular, we start from the original pre-trained SigLIP model
(which uses images of fixed-size square images) and adapt it by training on images of variable resolutions.
"""
def __init__(self, config: Idefics2VisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, padding='valid')
self.num_patches_per_side = self.image_size // self.patch_size
self.num_patches = self.num_patches_per_side ** 2
self.num_positions = self.num_patches
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor:
batch_size, _, max_im_h, max_im_w = pixel_values.shape
patch_embeds = self.patch_embedding(pixel_values)
embeddings = patch_embeds.flatten(2).transpose(1, 2)
max_nb_patches_h, max_nb_patches_w = (max_im_h // self.patch_size, max_im_w // self.patch_size)
boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side, device=pixel_values.device)
position_ids = torch.full(size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0, device=pixel_values.device)
for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
nb_patches_h = p_attn_mask[:, 0].sum()
nb_patches_w = p_attn_mask[0].sum()
h_indices = torch.arange(nb_patches_h, device=position_ids.device, dtype=pixel_values.dtype)
w_indices = torch.arange(nb_patches_w, device=position_ids.device, dtype=pixel_values.dtype)
fractional_coords_h = h_indices / nb_patches_h * (1 - 1e-06)
fractional_coords_w = w_indices / nb_patches_w * (1 - 1e-06)
bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
position_ids[batch_idx][p_attn_mask.view(-1)] = pos_ids
embeddings = embeddings + self.position_embedding(position_ids)
return embeddings
|
class Idefics2VisionEmbeddings(nn.Module):
'''
This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable
resolution.
The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://huggingface.co/papers/2307.06304)
which allows treating images in their native aspect ratio and without the need to resize them to the same
fixed size. In particular, we start from the original pre-trained SigLIP model
(which uses images of fixed-size square images) and adapt it by training on images of variable resolutions.
'''
def __init__(self, config: Idefics2VisionConfig):
pass
def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor:
pass
| 3
| 1
| 22
| 5
| 18
| 0
| 2
| 0.22
| 1
| 4
| 1
| 0
| 2
| 8
| 2
| 12
| 56
| 12
| 36
| 25
| 33
| 8
| 30
| 25
| 27
| 2
| 1
| 1
| 3
|
3,081
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2VisionMLP
|
from ...activations import ACT2FN
from torch import nn
import torch
class Idefics2VisionMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class Idefics2VisionMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
3,082
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/modeling_idefics2.py
|
transformers.models.idefics2.modeling_idefics2.Idefics2VisionTransformer
|
from ...modeling_outputs import BaseModelOutput, ModelOutput
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.generic import check_model_inputs
from .configuration_idefics2 import Idefics2Config, Idefics2PerceiverConfig, Idefics2VisionConfig
from torch import nn
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
from typing import Callable, Optional, Union
import torch
@auto_docstring(custom_intro='\n Idefics2 vision encoder model that returnss raw image embeddings.\n ')
class Idefics2VisionTransformer(Idefics2PreTrainedModel):
config: Idefics2VisionConfig
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_can_record_outputs = {'hidden_states': Idefics2EncoderLayer, 'attentions': Idefics2VisionAttention}
def __init__(self, config: Idefics2VisionConfig):
super().__init__(config)
embed_dim = config.hidden_size
self.config = config
self.embeddings = Idefics2VisionEmbeddings(config)
self.encoder = Idefics2Encoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings = value
@check_model_inputs
@auto_docstring
def forward(self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
"""
patch_attention_mask (`torch.BoolTensor` of shape `(batch_size, num_patches_height, num_patches_width)`, *optional*):
The attention mask for the patches.
"""
batch_size = pixel_values.size(0)
if patch_attention_mask is None:
patch_size = self.config.patch_size
patch_attention_mask = torch.ones((batch_size, pixel_values.size(2) // patch_size, pixel_values.size(3) // patch_size))
patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device)
hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
patch_attention_mask = patch_attention_mask.view(batch_size, -1)
if not torch.any(~patch_attention_mask):
patch_attention_mask = None
elif self.config._attn_implementation != 'flash_attention_2':
patch_attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
encoder_outputs: BaseModelOutput = self.encoder(inputs_embeds=hidden_states, attention_mask=patch_attention_mask, **kwargs)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
return BaseModelOutput(last_hidden_state=last_hidden_state)
|
@auto_docstring(custom_intro='\n Idefics2 vision encoder model that returnss raw image embeddings.\n ')
class Idefics2VisionTransformer(Idefics2PreTrainedModel):
def __init__(self, config: Idefics2VisionConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
'''
patch_attention_mask (`torch.BoolTensor` of shape `(batch_size, num_patches_height, num_patches_width)`, *optional*):
The attention mask for the patches.
'''
pass
| 8
| 1
| 17
| 2
| 15
| 1
| 3
| 0.05
| 1
| 6
| 4
| 0
| 4
| 5
| 4
| 5
| 76
| 12
| 61
| 25
| 49
| 3
| 35
| 18
| 30
| 8
| 2
| 1
| 11
|
3,083
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/processing_idefics2.py
|
transformers.models.idefics2.processing_idefics2.Idefics2ImagesKwargs
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from typing import TYPE_CHECKING, Optional, Union
class Idefics2ImagesKwargs(ImagesKwargs, total=False):
image_seq_len: Optional[int]
|
class Idefics2ImagesKwargs(ImagesKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
3,084
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/processing_idefics2.py
|
transformers.models.idefics2.processing_idefics2.Idefics2Processor
|
from typing import TYPE_CHECKING, Optional, Union
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import AddedToken, TextInput
from itertools import accumulate
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput, is_valid_image, load_image
class Idefics2Processor(ProcessorMixin):
"""
Constructs a IDEFICS2 processor which wraps a LLama tokenizer and IDEFICS2 image processor into a single processor.
[`IdeficsProcessor`] offers all the functionalities of [`Idefics2ImageProcessor`] and [`LlamaTokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`Idefics2ImageProcessor`):
An instance of [`Idefics2ImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
image_seq_len (`int`, *optional*, defaults to 64):
The length of the image sequence i.e. the number of <image> tokens per image in the input.
This parameter is used to build the string from the input prompt and image tokens and should match the
config.perceiver_config.resampler_n_latents value for the model used.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'Idefics2ImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor, tokenizer=None, image_seq_len: int=64, chat_template: Optional[str]=None, **kwargs):
if not hasattr(tokenizer, 'image_token'):
self.fake_image_token = AddedToken('<fake_token_around_image>', normalized=False, special=True).content
self.image_token = AddedToken('<image>', normalized=False, special=True).content
tokens_to_add = {'additional_special_tokens': [self.fake_image_token, self.image_token]}
tokenizer.add_special_tokens(tokens_to_add)
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
else:
self.fake_image_token = tokenizer.image_boundary_token
self.image_token = tokenizer.image_token
self.image_token_id = tokenizer.image_token_id
self.end_of_utterance_token = AddedToken('<end_of_utterance>', normalized=False, special=True)
tokenizer.add_special_tokens({'additional_special_tokens': [self.end_of_utterance_token]})
self.image_seq_len = image_seq_len
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def _extract_images_from_prompts(self, prompts):
prompt_images = []
for prompt in prompts:
images = []
for elem in prompt:
if is_valid_image(elem):
images.append(elem)
elif is_url(elem):
images.append(load_image(elem))
prompt_images.append(images)
return prompt_images
def __call__(self, images: Union[ImageInput, list[ImageInput], list[list[ImageInput]]]=None, text: Union[TextInput, 'PreTokenizedInput', list[TextInput], list['PreTokenizedInput']]=None, audio=None, videos=None, **kwargs: Unpack[Idefics2ProcessorKwargs]) -> BatchFeature:
"""
Processes the input prompts and returns a BatchEncoding.
Example:
```python
>>> import requests
>>> from transformers import Idefics2Processor
>>> from transformers.image_utils import load_image
>>> processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b", image_seq_len=2)
>>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
>>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
>>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
>>> image1, image2 = load_image(url1), load_image(url2)
>>> images = [[image1], [image2]]
>>> text = [
... "<image>In this image, we see",
... "bla bla bla<image>",
... ]
>>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True)
>>> input_ids = outputs.input_ids
>>> input_tokens = processor.tokenizer.batch_decode(input_ids)
>>> print(input_tokens)
['<s><fake_token_around_image><image><image><fake_token_around_image> In this image, we see', '<s> bla bla bla<fake_token_around_image><image><image><fake_token_around_image>']
```
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
text (`Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Wherever an image token, `<image>` is encountered it is expanded to
`<fake_token_around_image>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
return_tensors (`Union[str, TensorType]`, *optional*):
If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
information.
"""
if text is None and images is None:
raise ValueError('You must provide either `text` or `images`.')
output_kwargs = self._merge_kwargs(Idefics2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
image_seq_len = output_kwargs['images_kwargs'].pop('image_seq_len', None)
image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)
n_images_in_text = []
inputs = {}
if text is not None:
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and (not isinstance(text[0], str)):
raise ValueError('Invalid input text. Please provide a string, or a list of strings')
fake_image_token = self.fake_image_token
image_token = self.image_token
image_str = f'{fake_image_token}{image_token * image_seq_len}{fake_image_token}'
if self.image_processor.do_image_splitting:
image_str = image_str * 5
image_seq_len *= 5
prompt_strings = []
for sample in text:
n_images_in_text.append(sample.count(image_token))
sample = sample.replace(image_token, image_str)
sample = sample.replace(f'{fake_image_token}{fake_image_token}', f'{fake_image_token}')
prompt_strings.append(sample)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs['text_kwargs'])
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=['image'])
inputs.update(text_inputs)
if images is not None:
if is_image_or_image_url(images):
images = [[images]]
elif isinstance(images, (list, tuple)) and is_image_or_image_url(images[0]):
if text is not None:
if sum(n_images_in_text) != len(images):
raise ValueError(f'The total number of {image_token} tokens in the prompts should be the same as the number of images passed. Found {sum(n_images_in_text)} {image_token} tokens and {len(images)} images.')
cumsum_images_in_text = [0] + list(accumulate(n_images_in_text))
images = [images[cumsum_images_in_text[i]:cumsum_images_in_text[i + 1]] for i in range(len(n_images_in_text))]
else:
images = [images]
elif not isinstance(images, (list, tuple)) and (not isinstance(images[0], (list, tuple))) and (not is_image_or_image_url(images[0][0])):
raise ValueError('Invalid input images. Please provide a single image or a list of images or a list of list of images.')
n_images_in_images = [len(sample) for sample in images]
if text is not None and (not n_images_in_images == n_images_in_text):
raise ValueError(f'The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same.')
images = [[load_image(im) for im in sample] for sample in images]
image_inputs = self.image_processor(images, **output_kwargs['images_kwargs'])
inputs.update(image_inputs)
return BatchFeature(inputs, tensor_type=return_tensors)
|
class Idefics2Processor(ProcessorMixin):
'''
Constructs a IDEFICS2 processor which wraps a LLama tokenizer and IDEFICS2 image processor into a single processor.
[`IdeficsProcessor`] offers all the functionalities of [`Idefics2ImageProcessor`] and [`LlamaTokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`Idefics2ImageProcessor`):
An instance of [`Idefics2ImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
image_seq_len (`int`, *optional*, defaults to 64):
The length of the image sequence i.e. the number of <image> tokens per image in the input.
This parameter is used to build the string from the input prompt and image tokens and should match the
config.perceiver_config.resampler_n_latents value for the model used.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
'''
def __init__(self, image_processor, tokenizer=None, image_seq_len: int=64, chat_template: Optional[str]=None, **kwargs):
pass
def _extract_images_from_prompts(self, prompts):
pass
def __call__(self, images: Union[ImageInput, list[ImageInput], list[list[ImageInput]]]=None, text: Union[TextInput, 'PreTokenizedInput', list[TextInput], list['PreTokenizedInput']]=None, audio=None, videos=None, **kwargs: Unpack[Idefics2ProcessorKwargs]) -> BatchFeature:
'''
Processes the input prompts and returns a BatchEncoding.
Example:
```python
>>> import requests
>>> from transformers import Idefics2Processor
>>> from transformers.image_utils import load_image
>>> processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b", image_seq_len=2)
>>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
>>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
>>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
>>> image1, image2 = load_image(url1), load_image(url2)
>>> images = [[image1], [image2]]
>>> text = [
... "<image>In this image, we see",
... "bla bla bla<image>",
... ]
>>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True)
>>> input_ids = outputs.input_ids
>>> input_tokens = processor.tokenizer.batch_decode(input_ids)
>>> print(input_tokens)
['<s><fake_token_around_image><image><image><fake_token_around_image> In this image, we see', '<s> bla bla bla<fake_token_around_image><image><image><fake_token_around_image>']
```
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
text (`Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Wherever an image token, `<image>` is encountered it is expanded to
`<fake_token_around_image>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
return_tensors (`Union[str, TensorType]`, *optional*):
If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
information.
'''
pass
| 4
| 2
| 31
| 4
| 18
| 9
| 5
| 0.59
| 1
| 9
| 2
| 0
| 6
| 4
| 6
| 23
| 214
| 33
| 114
| 43
| 99
| 67
| 82
| 35
| 75
| 15
| 2
| 4
| 27
|
3,085
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics2/processing_idefics2.py
|
transformers.models.idefics2.processing_idefics2.Idefics2ProcessorKwargs
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
class Idefics2ProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: Idefics2ImagesKwargs
_defaults = {'text_kwargs': {'add_special_tokens': True, 'padding': False, 'is_split_into_words': False}, 'images_kwargs': {}}
|
class Idefics2ProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 1
| 10
| 2
| 9
| 0
| 3
| 2
| 2
| 0
| 3
| 0
| 0
|
3,086
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/configuration_idefics3.py
|
transformers.models.idefics3.configuration_idefics3.Idefics3Config
|
from ...configuration_utils import PretrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
class Idefics3Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Idefics3Model`]. It is used to instantiate a
Idefics3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the model of the Idefics3
[HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should cache the key/value pairs of the attention mechanism. Only
relevant if `config.is_decoder=True`.
image_token_id (`int`, *optional*, defaults to 128257):
The id of the "image" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to tie the word embeddings with the token embeddings.
vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`):
Custom vision config or dict for the vision tower
text_config (`PretrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`):
Custom text config or dict for the text model
scale_factor (`int`, *optional*, defaults to 2):
The scale factor for the image encoder.
pad_token_id (`int`, *optional*, defaults to 128002):
The id of the padding token.
Example:
```python
>>> from transformers import Idefics3Model, Idefics3Config
>>> # Initializing configuration
>>> configuration = Idefics3Config()
>>> # Initializing a model from the configuration
>>> model = Idefics3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'idefics3'
sub_configs = {'text_config': AutoConfig, 'vision_config': Idefics3VisionConfig}
def __init__(self, use_cache=True, image_token_id=128257, tie_word_embeddings=False, vision_config=None, text_config=None, scale_factor=2, pad_token_id=128002, **kwargs):
self.image_token_id = image_token_id
self.use_cache = use_cache
self.tie_word_embeddings = tie_word_embeddings
if vision_config is None:
self.vision_config = Idefics3VisionConfig()
logger.info('vision_config is None, using default vision config')
elif isinstance(vision_config, dict):
self.vision_config = Idefics3VisionConfig(**vision_config)
elif isinstance(vision_config, Idefics3VisionConfig):
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config['model_type'] = text_config.get('model_type', 'llama')
text_config = CONFIG_MAPPING[text_config['model_type']](**text_config)
elif text_config is None:
logger.info('text_config is None, using default text config')
text_config = CONFIG_MAPPING['llama'](rms_norm_eps=1e-05, pad_token_id=pad_token_id, tie_word_embeddings=False)
self.text_config = text_config
self.scale_factor = scale_factor
super().__init__(**kwargs, pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings)
|
class Idefics3Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Idefics3Model`]. It is used to instantiate a
Idefics3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the model of the Idefics3
[HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should cache the key/value pairs of the attention mechanism. Only
relevant if `config.is_decoder=True`.
image_token_id (`int`, *optional*, defaults to 128257):
The id of the "image" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to tie the word embeddings with the token embeddings.
vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`):
Custom vision config or dict for the vision tower
text_config (`PretrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`):
Custom text config or dict for the text model
scale_factor (`int`, *optional*, defaults to 2):
The scale factor for the image encoder.
pad_token_id (`int`, *optional*, defaults to 128002):
The id of the padding token.
Example:
```python
>>> from transformers import Idefics3Model, Idefics3Config
>>> # Initializing configuration
>>> configuration = Idefics3Config()
>>> # Initializing a model from the configuration
>>> model = Idefics3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, use_cache=True, image_token_id=128257, tie_word_embeddings=False, vision_config=None, text_config=None, scale_factor=2, pad_token_id=128002, **kwargs):
pass
| 2
| 1
| 38
| 4
| 34
| 0
| 7
| 0.89
| 1
| 3
| 1
| 0
| 1
| 6
| 1
| 1
| 79
| 9
| 37
| 20
| 25
| 33
| 20
| 10
| 18
| 7
| 1
| 1
| 7
|
3,087
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/configuration_idefics3.py
|
transformers.models.idefics3.configuration_idefics3.Idefics3VisionConfig
|
from ...configuration_utils import PretrainedConfig
class Idefics3VisionConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Idefics3VisionModel`]. It is used to instantiate a
Idefics3 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
[google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics3 model
[HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1152):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers.models.idefics3.modeling_idefics3 import Idefics3VisionTransformer
>>> from transformers.models.idefics3.configuration_idefics3 import Idefics3VisionConfig
>>> # Initializing a Idefics3VisionConfig with google/siglip-base-patch16-224 style configuration
>>> configuration = Idefics3VisionConfig()
>>> # Initializing a Idefics3VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration
>>> model = Idefics3VisionTransformer(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'idefics3_vision'
base_config_key = 'vision_config'
def __init__(self, hidden_size=1152, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=16, num_channels=3, image_size=224, patch_size=32, hidden_act='gelu_pytorch_tanh', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
|
class Idefics3VisionConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Idefics3VisionModel`]. It is used to instantiate a
Idefics3 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
[google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics3 model
[HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1152):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers.models.idefics3.modeling_idefics3 import Idefics3VisionTransformer
>>> from transformers.models.idefics3.configuration_idefics3 import Idefics3VisionConfig
>>> # Initializing a Idefics3VisionConfig with google/siglip-base-patch16-224 style configuration
>>> configuration = Idefics3VisionConfig()
>>> # Initializing a Idefics3VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration
>>> model = Idefics3VisionTransformer(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=1152, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=16, num_channels=3, image_size=224, patch_size=32, hidden_act='gelu_pytorch_tanh', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=0.02, **kwargs):
pass
| 2
| 1
| 28
| 1
| 27
| 0
| 1
| 1.43
| 1
| 1
| 0
| 0
| 1
| 11
| 1
| 1
| 83
| 10
| 30
| 29
| 14
| 43
| 16
| 15
| 14
| 1
| 1
| 0
| 1
|
3,088
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/image_processing_idefics3.py
|
transformers.models.idefics3.image_processing_idefics3.Idefics3ImageProcessor
|
import numpy as np
from typing import Any, Optional, Union
import math
from ...image_transforms import PaddingMode, pad, to_channel_dimension_format, to_pil_image
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_nested_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from collections.abc import Iterable
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...utils import TensorType, is_vision_available, logging
class Idefics3ImageProcessor(BaseImageProcessor):
"""
Constructs a Idefics3 image processor.
Args:
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA.
Only has an effect if the input image is in the PIL format.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image. The longest edge of the image is resized to be <= `size["longest_edge"]`, with the
shortest edge resized to keep the input aspect ratio.
size (`Dict`, *optional*, defaults to `{"longest_edge": 4 * 364}`):
Controls the size of the output image. This is a dictionary containing the key "longest_edge".
The image will be resized such that the longest edge is <= `size["longest_edge"]` and the shortest edge is resized
to keep the input aspect ratio.
resample (`Resampling`, *optional*, defaults to `Resampling.LANCZOS`):
Resampling filter to use when resizing the image.
do_image_splitting (`bool`, *optional*, defaults to `True`):
Whether to split the image into sub-images concatenated with the original image. They are split into patches
such that each patch has a size of `max_image_size["height"]` x `max_image_size["width"]`.
max_image_size (`Dict`, *optional*, defaults to `{"longest_edge": 364}`):
Maximum resolution of the patches of images accepted by the model. This is a dictionary containing the key "longest_edge".
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image. If set to `True`, the image is rescaled to have pixel values between 0 and 1.
rescale_factor (`float`, *optional*, defaults to `1/255`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. If set to `True`, the image is normalized to have a mean of `image_mean` and
a standard deviation of `image_std`.
image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether or not to pad the images to the largest height and width in the batch and number of images per
sample in the batch, such that the returned tensor is of shape (batch_size, max_num_images, num_channels, max_height, max_width).
"""
model_input_names = ['pixel_values', 'pixel_attention_mask']
def __init__(self, do_convert_rgb: bool=True, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.LANCZOS, do_image_splitting: bool=True, max_image_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: bool=True, **kwargs) -> None:
super().__init__(**kwargs)
self.do_convert_rgb = do_convert_rgb
self.do_resize = do_resize
self.size = size if size is not None else {'longest_edge': 4 * 364}
self.resample = resample
self.do_image_splitting = do_image_splitting
self.max_image_size = max_image_size if max_image_size is not None else {'longest_edge': 364}
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_pad = do_pad
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image. The longest edge of the image is resized to size["longest_edge"], with the shortest edge
resized to keep the input aspect ratio. Can also be used with size["height"] and size["width"].
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4))
data_format = input_data_format if data_format is None else data_format
if 'longest_edge' in size:
size = get_resize_output_image_size(image, resolution_max_side=size['longest_edge'], input_data_format=input_data_format)
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
else:
raise ValueError("size must be a dictionary with key 'longest_edge' or 'height' and 'width'.")
image_mode = None
if image.ndim == 2 or image.shape[-1] == 1:
image_mode = 'P'
image = to_pil_image(image, image_mode=image_mode, input_data_format=input_data_format)
resized_image = image.resize((size[1], size[0]), resample=resample)
resized_image = np.array(resized_image)
resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image
resized_image = to_channel_dimension_format(resized_image, data_format, input_channel_dim=ChannelDimension.LAST)
return resized_image
def split_image(self, image, max_image_size: dict[str, int], resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Split an image into squares of side max_image_size and the original image resized to max_image_size.
That means that a single image becomes a sequence of images.
This is a "trick" to spend more compute on each image with no changes in the vision encoder.
1) If one side of the original image is larger than `max_image_size`, resize it to `max_image_size` while preserving the aspect ratio.
2) Divide the resulting image into `ceil(height / max_image_size)` x `ceil(width / max_image_size)`
sub-images of the same size each (image_size, image_size). Typically, 364x364.
3) Returns the list of the crops and the original image, in addition to the number of splits for the height and the width.
Args:
image (`np.ndarray`):
Images to split.
max_image_size (`dict[str, int]`):
Maximum size of the output image. If the image is larger than this size, it will be split into
patches of this size, and the original image will be concatenated with the patches, resized to max_size.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
height, width = get_image_size(image, channel_dim=input_data_format)
max_height = max_width = max_image_size['longest_edge']
frames = []
if height > max_height or width > max_width:
num_splits_h = math.ceil(height / max_height)
num_splits_w = math.ceil(width / max_width)
optimal_height = math.ceil(height / num_splits_h)
optimal_width = math.ceil(width / num_splits_w)
for r in range(num_splits_h):
for c in range(num_splits_w):
start_x = c * optimal_width
start_y = r * optimal_height
end_x = min(start_x + optimal_width, width)
end_y = min(start_y + optimal_height, height)
cropped_image = _crop(image, start_x, start_y, end_x, end_y, data_format=data_format)
frames.append(cropped_image)
global_image_height, global_image_width = (max_height, max_width)
if height != global_image_height or width != global_image_width:
image = self.resize(image, {'height': global_image_height, 'width': global_image_width}, resample=resample, input_data_format=data_format)
else:
num_splits_h, num_splits_w = (0, 0)
frames.append(image)
return (frames, num_splits_h, num_splits_w)
def resize_for_vision_encoder(self, image: np.ndarray, vision_encoder_max_size: int, resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Resize images to be multiples of `vision_encoder_max_size` while preserving the aspect ratio.
Args:
image (`np.ndarray`):
Images to resize.
vision_encoder_max_size (`int`):
Maximum size of the output image. If the image is larger than this size, it will be split into
patches of this size, and the original image will be concatenated with the patches, resized to max_size.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred
"""
height, width = get_image_size(image, channel_dim=input_data_format)
aspect_ratio = width / height
if width >= height:
width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size
height = int(width / aspect_ratio)
height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size
elif height > width:
height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size
width = int(height * aspect_ratio)
width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size
new_size = {'height': height, 'width': width}
return self.resize(image, size=new_size, resample=resample, input_data_format=input_data_format, data_format=data_format)
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
return padded_image
def pad(self, images: list[list[np.ndarray]], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
"""
For a list of images, for each images, pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width.
For each sample in the batch, pads the sample with empty images to the max_number of images per sample in the batch. Optionally returns a pixel mask.
Args:
images (`list[list[np.ndarray]]`):
List of list of images to pad. Pads to the largest height and width in the batch.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
pad_size = get_max_height_width(images, input_data_format=input_data_format)
batch_size = len(images)
max_num_images = max((len(images_) for images_ in images))
input_data_format = infer_channel_dimension_format(images[0][0], num_channels=(1, 3, 4)) if input_data_format is None else input_data_format
data_format = input_data_format if data_format is None else data_format
first_image_in_list = [sample_images for sample_images in images if sample_images][0][0]
if input_data_format == ChannelDimension.FIRST:
n_channels = first_image_in_list.shape[0]
elif input_data_format == ChannelDimension.LAST:
n_channels = first_image_in_list.shape[-1]
else:
raise ValueError('Invalid channel dimension format.')
def empty_image(size, input_data_format):
if input_data_format == ChannelDimension.FIRST:
return np.zeros((n_channels, *size), dtype=np.uint8)
elif input_data_format == ChannelDimension.LAST:
return np.zeros((*size, n_channels), dtype=np.uint8)
padded_images_list = [[empty_image(pad_size, data_format) for _ in range(max_num_images)] for _ in range(batch_size)]
padded_masks = [[np.zeros(pad_size, dtype=np.int64) for _ in range(max_num_images)] for _ in range(batch_size)]
for batch_idx in range(batch_size):
for sample_idx, image in enumerate(images[batch_idx]):
padded_images_list[batch_idx][sample_idx] = self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
padded_masks[batch_idx][sample_idx] = make_pixel_mask(image, output_size=pad_size, input_data_format=input_data_format)
padded_masks = padded_masks if return_pixel_mask else None
return (padded_images_list, padded_masks)
def preprocess(self, images: ImageInput, do_convert_rgb: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_image_splitting: Optional[bool]=None, do_rescale: Optional[bool]=None, max_image_size: Optional[dict[str, int]]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_row_col_info: bool=False, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Preprocess a batch of images.
Args:
images (`ImageInput`):
A list of images to preprocess.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. With the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_image_splitting (`bool`, *optional*, defaults to `self.do_image_splitting`):
Whether to split the image into sub-images concatenated with the original image. They are split into patches
such that each patch has a size of `max_image_size["height"]` x `max_image_size["width"]`.
max_image_size (`Dict`, *optional*, defaults to `self.max_image_size`):
Maximum resolution of the images. If the image is larger than this size, the image is split into patches.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether or not to pad the images to the largest height and width in the batch.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
return_row_col_info (`bool`, *optional*, default to `False`):
Whether to return the number of rows and columns of the split images. This is used for the
`Idefics3Processor` to generate prompt strings based on the number of rows and columns.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_image_splitting = do_image_splitting if do_image_splitting is not None else self.do_image_splitting
max_image_size = max_image_size if max_image_size is not None else self.max_image_size
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_pad = do_pad if do_pad is not None else self.do_pad
images = self.fetch_images(images)
images_list = make_nested_list_of_images(images)
if not valid_images(images_list[0]):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
palettes_list = [[im.getpalette() if isinstance(im, Image.Image) and im.mode == 'P' else None for im in images] for images in images_list]
images_list = [[to_numpy_array(image) for image in images] for images in images_list]
first_image_in_list = [images for images in images_list if images][0][0]
if input_data_format in [ChannelDimension.LAST, None]:
images_list = [[np.expand_dims(img, axis=-1) if img.ndim == 2 else img for img in images] for images in images_list]
elif input_data_format == ChannelDimension.FIRST:
images_list = [[np.expand_dims(img, axis=0) if img.ndim == 2 else img for img in images] for images in images_list]
if do_rescale and is_scaled_image(first_image_in_list):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(first_image_in_list, num_channels=(1, 3, 4))
if do_resize:
images_list = [[self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] for images in images_list]
if do_image_splitting:
images_list = [[self.resize_for_vision_encoder(image, max_image_size['longest_edge'], resample=resample, input_data_format=input_data_format) for image in images] for images in images_list]
images_list_split_arrays = []
palettes_list_split_arrays = []
images_list_rows = []
images_list_cols = []
for images, palettes in zip(images_list, palettes_list):
split_image_arrays = []
split_palettes_arrays = []
image_rows = []
image_cols = []
for image, palette in zip(images, palettes):
split_image_array, rows, cols = self.split_image(image, max_image_size=max_image_size, resample=resample, input_data_format=input_data_format)
split_image_arrays.extend(split_image_array)
split_palettes_arrays.extend([palette] * len(split_image_array))
image_rows.append(rows)
image_cols.append(cols)
images_list_split_arrays.append(split_image_arrays)
palettes_list_split_arrays.append(split_palettes_arrays)
images_list_rows.append(image_rows)
images_list_cols.append(image_cols)
images_list = images_list_split_arrays
palettes_list = palettes_list_split_arrays
else:
images_list = [[self.resize(image=image, size={'height': max_image_size['longest_edge'], 'width': max_image_size['longest_edge']}, resample=resample, input_data_format=input_data_format) for image in images] for images in images_list]
images_list_rows = [[0] * len(images) for images in images_list]
images_list_cols = [[0] * len(images) for images in images_list]
if do_convert_rgb:
images_list = [[convert_to_rgb(img, palette) for img, palette in zip(images, palettes)] for images, palettes in zip(images_list, palettes_list)]
if do_rescale:
images_list = [[self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] for images in images_list]
if do_normalize:
images_list = [[self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] for images in images_list]
pixel_attention_mask = None
if do_pad:
images_list, pixel_attention_mask = self.pad(images_list, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=input_data_format)
if data_format is not None:
images_list = [[to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] for images in images_list]
data = {'pixel_values': np.array(images_list) if do_pad and return_tensors is not None else images_list}
if pixel_attention_mask is not None:
data['pixel_attention_mask'] = np.array(pixel_attention_mask) if do_pad and return_tensors is not None else pixel_attention_mask
encoding = BatchFeature(data=data, tensor_type=return_tensors)
if return_row_col_info:
encoding['rows'] = images_list_rows
encoding['cols'] = images_list_cols
return encoding
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
"""
do_image_splitting = images_kwargs.get('do_image_splitting', self.do_image_splitting)
max_image_size = images_kwargs.get('max_image_size', self.max_image_size)
size = images_kwargs.get('size', self.size)
num_patches = num_rows = num_cols = 1
if do_image_splitting:
height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=size['longest_edge'])
height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=4096)
aspect_ratio = width / height
if width >= height:
resized_width = math.ceil(width / max_image_size['longest_edge']) * max_image_size['longest_edge']
resized_height = int(width / aspect_ratio)
resized_height = math.ceil(height / max_image_size['longest_edge']) * max_image_size['longest_edge']
elif height > width:
resized_height = math.ceil(height / max_image_size['longest_edge']) * max_image_size['longest_edge']
resized_width = int(height * aspect_ratio)
resized_width = math.ceil(width / max_image_size['longest_edge']) * max_image_size['longest_edge']
max_height = max_width = max_image_size['longest_edge']
if resized_height > max_height or resized_width > max_width:
num_rows = math.ceil(resized_height / max_height)
num_cols = math.ceil(resized_width / max_width)
num_patches = num_rows * num_cols + 1
return (num_patches, num_rows, num_cols)
|
class Idefics3ImageProcessor(BaseImageProcessor):
'''
Constructs a Idefics3 image processor.
Args:
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA.
Only has an effect if the input image is in the PIL format.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image. The longest edge of the image is resized to be <= `size["longest_edge"]`, with the
shortest edge resized to keep the input aspect ratio.
size (`Dict`, *optional*, defaults to `{"longest_edge": 4 * 364}`):
Controls the size of the output image. This is a dictionary containing the key "longest_edge".
The image will be resized such that the longest edge is <= `size["longest_edge"]` and the shortest edge is resized
to keep the input aspect ratio.
resample (`Resampling`, *optional*, defaults to `Resampling.LANCZOS`):
Resampling filter to use when resizing the image.
do_image_splitting (`bool`, *optional*, defaults to `True`):
Whether to split the image into sub-images concatenated with the original image. They are split into patches
such that each patch has a size of `max_image_size["height"]` x `max_image_size["width"]`.
max_image_size (`Dict`, *optional*, defaults to `{"longest_edge": 364}`):
Maximum resolution of the patches of images accepted by the model. This is a dictionary containing the key "longest_edge".
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image. If set to `True`, the image is rescaled to have pixel values between 0 and 1.
rescale_factor (`float`, *optional*, defaults to `1/255`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. If set to `True`, the image is normalized to have a mean of `image_mean` and
a standard deviation of `image_std`.
image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether or not to pad the images to the largest height and width in the batch and number of images per
sample in the batch, such that the returned tensor is of shape (batch_size, max_num_images, num_channels, max_height, max_width).
'''
def __init__(self, do_convert_rgb: bool=True, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.LANCZOS, do_image_splitting: bool=True, max_image_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: bool=True, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image. The longest edge of the image is resized to size["longest_edge"], with the shortest edge
resized to keep the input aspect ratio. Can also be used with size["height"] and size["width"].
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def split_image(self, image, max_image_size: dict[str, int], resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Split an image into squares of side max_image_size and the original image resized to max_image_size.
That means that a single image becomes a sequence of images.
This is a "trick" to spend more compute on each image with no changes in the vision encoder.
1) If one side of the original image is larger than `max_image_size`, resize it to `max_image_size` while preserving the aspect ratio.
2) Divide the resulting image into `ceil(height / max_image_size)` x `ceil(width / max_image_size)`
sub-images of the same size each (image_size, image_size). Typically, 364x364.
3) Returns the list of the crops and the original image, in addition to the number of splits for the height and the width.
Args:
image (`np.ndarray`):
Images to split.
max_image_size (`dict[str, int]`):
Maximum size of the output image. If the image is larger than this size, it will be split into
patches of this size, and the original image will be concatenated with the patches, resized to max_size.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def resize_for_vision_encoder(self, image: np.ndarray, vision_encoder_max_size: int, resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Resize images to be multiples of `vision_encoder_max_size` while preserving the aspect ratio.
Args:
image (`np.ndarray`):
Images to resize.
vision_encoder_max_size (`int`):
Maximum size of the output image. If the image is larger than this size, it will be split into
patches of this size, and the original image will be concatenated with the patches, resized to max_size.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred
'''
pass
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Pad an image with zeros to the given size.
'''
pass
def pad(self, images: list[list[np.ndarray]], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
'''
For a list of images, for each images, pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width.
For each sample in the batch, pads the sample with empty images to the max_number of images per sample in the batch. Optionally returns a pixel mask.
Args:
images (`list[list[np.ndarray]]`):
List of list of images to pad. Pads to the largest height and width in the batch.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def empty_image(size, input_data_format):
pass
def preprocess(self, images: ImageInput, do_convert_rgb: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_image_splitting: Optional[bool]=None, do_rescale: Optional[bool]=None, max_image_size: Optional[dict[str, int]]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_row_col_info: bool=False, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Preprocess a batch of images.
Args:
images (`ImageInput`):
A list of images to preprocess.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. With the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_image_splitting (`bool`, *optional*, defaults to `self.do_image_splitting`):
Whether to split the image into sub-images concatenated with the original image. They are split into patches
such that each patch has a size of `max_image_size["height"]` x `max_image_size["width"]`.
max_image_size (`Dict`, *optional*, defaults to `self.max_image_size`):
Maximum resolution of the images. If the image is larger than this size, the image is split into patches.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether or not to pad the images to the largest height and width in the batch.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
return_row_col_info (`bool`, *optional*, default to `False`):
Whether to return the number of rows and columns of the split images. This is used for the
`Idefics3Processor` to generate prompt strings based on the number of rows and columns.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
'''
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
'''
pass
| 10
| 8
| 70
| 5
| 46
| 19
| 8
| 0.53
| 1
| 12
| 3
| 0
| 7
| 12
| 7
| 27
| 601
| 47
| 363
| 144
| 283
| 191
| 171
| 72
| 162
| 34
| 3
| 3
| 66
|
3,089
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3BaseModelOutputWithPast
|
from typing import Callable, Optional, Union
import torch
from ...cache_utils import Cache, DynamicCache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from dataclasses import dataclass
from ...modeling_outputs import BaseModelOutput, ModelOutput
@dataclass
@auto_docstring(custom_intro="\n Base class for Idefics3 model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ")
class Idefics3BaseModelOutputWithPast(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Base class for Idefics3 model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ")
class Idefics3BaseModelOutputWithPast(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4.83
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 1
| 6
| 6
| 5
| 29
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
3,090
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3CausalLMOutputWithPast
|
from ...modeling_outputs import BaseModelOutput, ModelOutput
from ...cache_utils import Cache, DynamicCache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from typing import Callable, Optional, Union
from dataclasses import dataclass
import torch
@dataclass
@auto_docstring(custom_intro='\n Base class for Idefics causal language model (or autoregressive) outputs.\n ')
class Idefics3CausalLMOutputWithPast(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for Idefics causal language model (or autoregressive) outputs.\n ')
class Idefics3CausalLMOutputWithPast(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.71
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 35
| 2
| 7
| 7
| 6
| 26
| 7
| 7
| 6
| 0
| 1
| 0
| 0
|
3,091
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3Connector
|
from torch import nn
class Idefics3Connector(nn.Module):
def __init__(self, config):
super().__init__()
self.scale_factor = config.scale_factor
self.modality_projection = Idefics3SimpleMLP(config)
def pixel_shuffle(self, x, scale_factor=2):
bsz, seq, embed_dim = x.size()
height = width = int(seq ** 0.5)
x = x.view(bsz, height, width, embed_dim)
x = x.view(bsz, height, int(width / scale_factor), embed_dim * scale_factor)
x = x.permute(0, 2, 1, 3)
x = x.reshape(bsz, int(width / scale_factor), int(height / scale_factor), embed_dim * scale_factor ** 2)
x = x.permute(0, 2, 1, 3)
x = x.reshape(bsz, int(seq / scale_factor ** 2), embed_dim * scale_factor ** 2)
return x
def forward(self, image_hidden_states):
image_hidden_states = self.pixel_shuffle(image_hidden_states, self.scale_factor)
image_hidden_states = self.modality_projection(image_hidden_states)
return image_hidden_states
|
class Idefics3Connector(nn.Module):
def __init__(self, config):
pass
def pixel_shuffle(self, x, scale_factor=2):
pass
def forward(self, image_hidden_states):
pass
| 4
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 3
| 2
| 3
| 13
| 21
| 2
| 19
| 8
| 15
| 0
| 19
| 8
| 15
| 1
| 1
| 0
| 3
|
3,092
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3Encoder
|
from .configuration_idefics3 import Idefics3Config, Idefics3VisionConfig
from ...modeling_outputs import BaseModelOutput, ModelOutput
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from torch import nn
import torch
from typing import Callable, Optional, Union
class Idefics3Encoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Idefics3EncoderLayer`].
Args:
config: Idefics3Config
"""
def __init__(self, config: Idefics3Config):
super().__init__()
self.config = config
self.layers = nn.ModuleList([Idefics3EncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@auto_docstring
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutput]:
hidden_states = inputs_embeds
for encoder_layer in self.layers:
layer_outputs = encoder_layer(hidden_states, attention_mask)
hidden_states = layer_outputs
return BaseModelOutput(last_hidden_state=hidden_states)
|
class Idefics3Encoder(nn.Module):
'''
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Idefics3EncoderLayer`].
Args:
config: Idefics3Config
'''
def __init__(self, config: Idefics3Config):
pass
@auto_docstring
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutput]:
pass
| 4
| 1
| 38
| 4
| 24
| 10
| 7
| 0.56
| 1
| 8
| 3
| 0
| 2
| 3
| 2
| 12
| 86
| 11
| 48
| 18
| 38
| 27
| 27
| 11
| 24
| 12
| 1
| 2
| 13
|
3,093
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3EncoderLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...processing_utils import Unpack
import torch
from torch import nn
from .configuration_idefics3 import Idefics3Config, Idefics3VisionConfig
class Idefics3EncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Idefics3VisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = Idefics3VisionAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = Idefics3VisionMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class Idefics3EncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Idefics3VisionConfig):
pass
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
pass
| 4
| 0
| 22
| 3
| 15
| 5
| 2
| 0.37
| 1
| 5
| 2
| 0
| 2
| 5
| 2
| 12
| 47
| 6
| 30
| 16
| 22
| 11
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
3,094
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3ForConditionalGeneration
|
from ...cache_utils import Cache, DynamicCache
from torch import nn
from ...generation import GenerationMixin
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...processing_utils import Unpack
import torch
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n The Idefics3 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top.\n ')
class Idefics3ForConditionalGeneration(Idefics3PreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
super().__init__(config)
self.model = Idefics3Model(config)
self.image_token_id = self.config.image_token_id
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.vocab_size = config.text_config.vocab_size
self.post_init()
def enable_input_require_grads(self):
"""
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
the model weights fixed.
"""
def make_inputs_require_grads(module, input, output):
output.requires_grad_(True)
self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
self._vision_require_grads_hook = self.model.vision_model.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
def disable_input_require_grads(self):
self._text_require_grads_hook.remove()
self._vision_require_grads_hook.remove()
def get_input_embeddings(self):
return self.model.text_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.text_model.set_input_embeddings(value)
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.LongTensor]=None):
return self.model.get_image_features(pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Idefics3CausalLMOutputWithPast]:
"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics3ForConditionalGeneration`).
Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModelForVision2Seq
>>> from transformers.image_utils import load_image
>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
>>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3")
>>> model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3", dtype=torch.bfloat16, device_map="auto")
>>> # Create inputs
>>> messages = [
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."},
... {"type": "image"},
... {"type": "text", "text": "What can we see in this image?"},
... ]
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In which city is that bridge located?"},
... ]
... }
... ]
>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=256)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> print(generated_texts[0])
Assistant: There are buildings, trees, lights, and water visible in this image.
>>> print(generated_texts[1])
Assistant: The bridge is in San Francisco.
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_hidden_states=image_hidden_states, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, return_dict=True, **kwargs)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
return Idefics3CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, pixel_values=None, pixel_attention_mask=None, image_hidden_states=None, logits_to_keep=None, **kwargs):
model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_hidden_states=image_hidden_states, logits_to_keep=logits_to_keep, **kwargs)
if image_hidden_states is not None or cache_position[0] != 0:
model_inputs['pixel_values'] = None
model_inputs['pixel_attention_mask'] = None
return model_inputs
|
@auto_docstring(custom_intro='\n The Idefics3 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top.\n ')
class Idefics3ForConditionalGeneration(Idefics3PreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def enable_input_require_grads(self):
'''
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
the model weights fixed.
'''
pass
def make_inputs_require_grads(module, input, output):
pass
def disable_input_require_grads(self):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.LongTensor]=None):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Idefics3CausalLMOutputWithPast]:
'''
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics3ForConditionalGeneration`).
Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModelForVision2Seq
>>> from transformers.image_utils import load_image
>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
>>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3")
>>> model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3", dtype=torch.bfloat16, device_map="auto")
>>> # Create inputs
>>> messages = [
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."},
... {"type": "image"},
... {"type": "text", "text": "What can we see in this image?"},
... ]
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In which city is that bridge located?"},
... ]
... }
... ]
>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=256)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> print(generated_texts[0])
Assistant: There are buildings, trees, lights, and water visible in this image.
>>> print(generated_texts[1])
Assistant: The bridge is in San Francisco.
```'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, pixel_values=None, pixel_attention_mask=None, image_hidden_states=None, logits_to_keep=None, **kwargs):
pass
| 13
| 2
| 22
| 2
| 13
| 7
| 2
| 0.55
| 2
| 5
| 2
| 0
| 10
| 6
| 10
| 11
| 261
| 34
| 147
| 58
| 106
| 81
| 75
| 30
| 63
| 9
| 2
| 2
| 26
|
3,095
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3Model
|
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from .configuration_idefics3 import Idefics3Config, Idefics3VisionConfig
from ...cache_utils import Cache, DynamicCache
import torch
from ..auto import AutoModel
from ...processing_utils import Unpack
@auto_docstring(custom_intro='\n Idefics3 model consisting of a SIGLIP vision encoder and Llama3 language decoder\n ')
class Idefics3Model(Idefics3PreTrainedModel):
def __init__(self, config: Idefics3Config):
super().__init__(config)
self.padding_idx = self.config.text_config.pad_token_id
self.vocab_size = self.config.text_config.vocab_size
self.vision_model = Idefics3VisionTransformer._from_config(config.vision_config)
self.connector = Idefics3Connector(config)
self.text_model = AutoModel.from_config(config.text_config)
self.image_seq_len = int((config.vision_config.image_size // config.vision_config.patch_size) ** 2 / config.scale_factor ** 2)
self.image_token_id = self.config.image_token_id
self.post_init()
def enable_input_require_grads(self):
"""
Enables the gradients for the input embeddings.
This is useful for lora when using gradient checkpointing.
c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032
Override to set output.requires_grad = True for both the decoder's and vision model's embeddings.
"""
def get_lowest_module(module):
if len(list(module.children())) == 0:
return module
else:
return get_lowest_module(list(module.children())[0])
def make_inputs_require_grads(module, input, output):
output.requires_grad_(True)
self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
self._vision_require_grads_hook = get_lowest_module(self.vision_model).register_forward_hook(make_inputs_require_grads)
def disable_input_require_grads(self):
self._text_require_grads_hook.remove()
self._vision_require_grads_hook.remove()
def get_input_embeddings(self):
return self.text_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.text_model.set_input_embeddings(value)
def inputs_merger(self, input_ids: torch.LongTensor, inputs_embeds: Optional[torch.Tensor], image_hidden_states: Optional[torch.Tensor]):
"""
This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM.
The merging happens as follows:
- The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`.
- We get the image hidden states for the image through the vision encoder and that hidden state, after a pixel shuffle operation, is then projected into the text embedding space.
We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer.
- The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM.
- To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device))
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
image_hidden_states = image_hidden_states.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_hidden_states)
return inputs_embeds
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.LongTensor]=None):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
pixel_attention_mask (`torch.LongTensor`, *optional*):
The attention mask indicating padded regions in the image.
"""
batch_size, num_images, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.to(dtype=self.dtype)
pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])
nb_values_per_image = pixel_values.shape[1:].numel()
real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image
pixel_values = pixel_values[real_images_inds].contiguous()
if pixel_attention_mask is None:
pixel_attention_mask = torch.ones(size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)), dtype=torch.bool, device=pixel_values.device)
else:
pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:])
pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()
patch_size = self.config.vision_config.patch_size
patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)
patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)
patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
image_hidden_states.last_hidden_state
image_hidden_states = self.connector(image_hidden_states.last_hidden_state)
return image_hidden_states
@can_return_tuple
@auto_docstring(custom_intro="\n Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to\n the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where\n max_num_images is the maximum number of images among the batch_size samples in the batch.\n Padding images are not needed beyond padding the pixel_values at the entrance of the model.\n For efficiency, we only pass through the vision_model's forward the real images by\n discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where\n image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.\n ")
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, Idefics3BaseModelOutputWithPast]:
"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.training and self.text_model.gradient_checkpointing and use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.text_model.get_input_embeddings()(input_ids).to(self.device)
if pixel_values is not None and image_hidden_states is not None:
raise ValueError('You cannot specify both pixel_values and image_hidden_states at the same time')
elif pixel_values is not None:
image_hidden_states = self.get_image_features(pixel_values, pixel_attention_mask)
elif image_hidden_states is not None:
image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device)
if image_hidden_states is not None:
inputs_embeds = self.inputs_merger(input_ids=input_ids, inputs_embeds=inputs_embeds, image_hidden_states=image_hidden_states)
outputs = self.text_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, return_dict=True, **kwargs)
return Idefics3BaseModelOutputWithPast(last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_hidden_states)
|
@auto_docstring(custom_intro='\n Idefics3 model consisting of a SIGLIP vision encoder and Llama3 language decoder\n ')
class Idefics3Model(Idefics3PreTrainedModel):
def __init__(self, config: Idefics3Config):
pass
def enable_input_require_grads(self):
'''
Enables the gradients for the input embeddings.
This is useful for lora when using gradient checkpointing.
c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032
Override to set output.requires_grad = True for both the decoder's and vision model's embeddings.
'''
pass
def get_lowest_module(module):
pass
def make_inputs_require_grads(module, input, output):
pass
def disable_input_require_grads(self):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def inputs_merger(self, input_ids: torch.LongTensor, inputs_embeds: Optional[torch.Tensor], image_hidden_states: Optional[torch.Tensor]):
'''
This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM.
The merging happens as follows:
- The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`.
- We get the image hidden states for the image through the vision encoder and that hidden state, after a pixel shuffle operation, is then projected into the text embedding space.
We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer.
- The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM.
- To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states.
'''
pass
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.LongTensor]=None):
'''
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
pixel_attention_mask (`torch.LongTensor`, *optional*):
The attention mask indicating padded regions in the image.
'''
pass
@can_return_tuple
@auto_docstring(custom_intro="\n Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to\n the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where\n max_num_images is the maximum number of images among the batch_size samples in the batch.\n Padding images are not needed beyond padding the pixel_values at the entrance of the model.\n For efficiency, we only pass through the vision_model's forward the real images by\n discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where\n image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.\n ")
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, Idefics3BaseModelOutputWithPast]:
'''
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection.
'''
pass
| 14
| 4
| 23
| 3
| 17
| 3
| 3
| 0.21
| 1
| 13
| 6
| 0
| 7
| 11
| 7
| 8
| 218
| 31
| 155
| 66
| 114
| 33
| 82
| 34
| 72
| 18
| 2
| 2
| 27
|
3,096
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3PreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from .configuration_idefics3 import Idefics3Config, Idefics3VisionConfig
from torch import nn
@auto_docstring
class Idefics3PreTrainedModel(PreTrainedModel):
config: Idefics3Config
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['Idefics3VisionAttention', 'Idefics3DecoderLayer']
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
def _init_weights(self, module):
std = getattr(self.config, 'initializer_range', self.config.get_text_config().initializer_range)
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
elif isinstance(module, Idefics3RMSNorm):
module.weight.data.fill_(1.0)
|
@auto_docstring
class Idefics3PreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 18
| 2
| 16
| 0
| 7
| 0.04
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 1
| 29
| 3
| 25
| 11
| 23
| 1
| 20
| 11
| 18
| 7
| 1
| 2
| 7
|
3,097
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3RMSNorm
|
from torch import nn
import torch
class Idefics3RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
Idefics3RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
class Idefics3RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
Idefics3RMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 4
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
3,098
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3SimpleMLP
|
from torch import nn
class Idefics3SimpleMLP(nn.Module):
def __init__(self, config):
super().__init__()
input_size = config.vision_config.hidden_size * config.scale_factor ** 2
output_size = config.text_config.hidden_size
self.proj = nn.Linear(input_size, output_size, bias=False)
def forward(self, x):
return self.proj(x)
|
class Idefics3SimpleMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 9
| 1
| 8
| 6
| 5
| 0
| 8
| 6
| 5
| 1
| 1
| 0
| 2
|
3,099
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3VisionAttention
|
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
from typing import Callable, Optional, Union
class Idefics3VisionAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.is_causal = False
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights)
|
class Idefics3VisionAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 2
| 36
| 7
| 28
| 2
| 4
| 0.09
| 1
| 4
| 0
| 1
| 2
| 11
| 2
| 12
| 77
| 15
| 57
| 26
| 49
| 5
| 41
| 21
| 38
| 5
| 1
| 2
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.