id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
500
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
|
transformers.models.albert.modeling_albert.AlbertLayerGroup
|
import torch
from ...processing_utils import Unpack
from .configuration_albert import AlbertConfig
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from torch import nn
class AlbertLayerGroup(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]:
for layer_index, albert_layer in enumerate(self.albert_layers):
hidden_states = albert_layer(hidden_states, attention_mask, head_mask[layer_index], **kwargs)
return hidden_states
|
class AlbertLayerGroup(nn.Module):
def __init__(self, config: AlbertConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]:
pass
| 3
| 0
| 16
| 3
| 13
| 1
| 4
| 0.04
| 1
| 7
| 2
| 0
| 2
| 1
| 2
| 12
| 33
| 6
| 27
| 16
| 17
| 1
| 20
| 9
| 17
| 6
| 1
| 2
| 7
|
501
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
|
transformers.models.albert.modeling_albert.AlbertMLMHead
|
from ...activations import ACT2FN
from torch import nn
from .configuration_albert import AlbertConfig
import torch
class AlbertMLMHead(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
self.decoder.bias = self.bias
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
prediction_scores = hidden_states
return prediction_scores
def _tie_weights(self) -> None:
if self.decoder.bias.device.type == 'meta':
self.decoder.bias = self.bias
else:
self.bias = self.decoder.bias
|
class AlbertMLMHead(nn.Module):
def __init__(self, config: AlbertConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def _tie_weights(self) -> None:
pass
| 4
| 0
| 8
| 1
| 7
| 1
| 1
| 0.1
| 1
| 3
| 1
| 0
| 3
| 5
| 3
| 13
| 28
| 5
| 21
| 10
| 17
| 2
| 20
| 10
| 16
| 2
| 1
| 1
| 4
|
502
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
|
transformers.models.albert.modeling_albert.AlbertModel
|
from .configuration_albert import AlbertConfig
from torch import nn
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from ...processing_utils import Unpack
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
@auto_docstring
class AlbertModel(AlbertPreTrainedModel):
config_class = AlbertConfig
base_model_prefix = 'albert'
def __init__(self, config: AlbertConfig, add_pooling_layer: bool=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertTransformer(config)
if add_pooling_layer:
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
else:
self.pooler = None
self.pooler_activation = None
self.attn_implementation = config._attn_implementation
self.position_embedding_type = config.position_embedding_type
self.post_init()
def get_input_embeddings(self) -> nn.Embedding:
return self.embeddings.word_embeddings
def set_input_embeddings(self, value: nn.Embedding) -> None:
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has
a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT
model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.
These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
while [2,3] correspond to the two inner groups of the second hidden layer.
Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more
information about head pruning
"""
for layer, heads in heads_to_prune.items():
group_idx = int(layer / self.config.inner_group_num)
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[BaseModelOutputWithPooling, tuple]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
attention_mask = self._update_full_mask(attention_mask, embedding_output)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(embedding_output, attention_mask, head_mask=head_mask, position_ids=position_ids, **kwargs)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output)
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
|
@auto_docstring
class AlbertModel(AlbertPreTrainedModel):
def __init__(self, config: AlbertConfig, add_pooling_layer: bool=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self) -> nn.Embedding:
pass
def set_input_embeddings(self, value: nn.Embedding) -> None:
pass
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has
a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT
model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.
These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
while [2,3] correspond to the two inner groups of the second hidden layer.
Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more
information about head pruning
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[BaseModelOutputWithPooling, tuple]:
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
| 10
| 2
| 25
| 3
| 19
| 2
| 4
| 0.1
| 1
| 8
| 4
| 0
| 5
| 7
| 5
| 6
| 137
| 22
| 105
| 41
| 82
| 11
| 59
| 29
| 53
| 14
| 2
| 2
| 20
|
503
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
|
transformers.models.albert.modeling_albert.AlbertPreTrainedModel
|
from torch import nn
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_albert import AlbertConfig
@auto_docstring
class AlbertPreTrainedModel(PreTrainedModel):
config_class = AlbertConfig
base_model_prefix = 'albert'
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': AlbertLayer, 'attentions': AlbertAttention}
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, AlbertMLMHead):
module.bias.data.zero_()
|
@auto_docstring
class AlbertPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights.'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.41
| 1
| 0
| 0
| 7
| 1
| 0
| 1
| 1
| 26
| 2
| 17
| 6
| 15
| 7
| 15
| 6
| 13
| 6
| 1
| 2
| 6
|
504
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
|
transformers.models.albert.modeling_albert.AlbertSOPHead
|
from torch import nn
from .configuration_albert import AlbertConfig
import torch
class AlbertSOPHead(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
dropout_pooled_output = self.dropout(pooled_output)
logits = self.classifier(dropout_pooled_output)
return logits
|
class AlbertSOPHead(nn.Module):
def __init__(self, config: AlbertConfig):
pass
def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 11
| 2
| 9
| 7
| 6
| 0
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
505
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
|
transformers.models.albert.modeling_albert.AlbertTransformer
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch
from torch import nn
from ...processing_utils import Unpack
from .configuration_albert import AlbertConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Callable, Optional, Union
class AlbertTransformer(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.config = config
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[BaseModelOutput, tuple]:
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask
for i in range(self.config.num_hidden_layers):
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
hidden_states = self.albert_layer_groups[group_idx](hidden_states, attention_mask, head_mask[group_idx * layers_per_group:(group_idx + 1) * layers_per_group], **kwargs)
return BaseModelOutput(last_hidden_state=hidden_states)
|
class AlbertTransformer(nn.Module):
def __init__(self, config: AlbertConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[BaseModelOutput, tuple]:
pass
| 3
| 0
| 25
| 5
| 19
| 1
| 5
| 0.05
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 51
| 10
| 39
| 20
| 28
| 2
| 23
| 12
| 20
| 8
| 1
| 2
| 9
|
506
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/tokenization_albert.py
|
transformers.models.albert.tokenization_albert.AlbertTokenizer
|
from ...utils.import_utils import requires
import unicodedata
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from shutil import copyfile
from typing import Any, Optional
import os
@requires(backends=('sentencepiece',))
class AlbertTokenizer(PreTrainedTokenizer):
"""
Construct an ALBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `False`):
Whether or not to keep accents when tokenizing.
bos_token (`str`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"[SEP]"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, do_lower_case=True, remove_space=True, keep_accents=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
@property
def vocab_size(self) -> int:
return len(self.sp_model)
def get_vocab(self) -> dict[str, int]:
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace('``', '"').replace("''", '"')
if not self.keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if not unicodedata.combining(c)])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text: str) -> list[str]:
"""Tokenize a string."""
text = self.preprocess_text(text)
pieces = self.sp_model.encode(text, out_type=str)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit():
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ''
prev_is_special = False
for token in tokens:
if token in self.all_special_tokens:
if not prev_is_special:
out_string += ' '
out_string += self.sp_model.decode(current_sub_tokens) + token
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An ALBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
|
@requires(backends=('sentencepiece',))
class AlbertTokenizer(PreTrainedTokenizer):
'''
Construct an ALBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `False`):
Whether or not to keep accents when tokenizing.
bos_token (`str`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"[SEP]"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
'''
def __init__(self, vocab_file, do_lower_case=True, remove_space=True, keep_accents=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
pass
@property
def vocab_size(self) -> int:
pass
def get_vocab(self) -> dict[str, int]:
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
def preprocess_text(self, inputs):
pass
def _tokenize(self, text: str) -> list[str]:
'''Tokenize a string.'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An ALBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 16
| 7
| 16
| 2
| 11
| 4
| 3
| 0.73
| 1
| 6
| 0
| 0
| 14
| 7
| 14
| 103
| 311
| 52
| 150
| 63
| 113
| 109
| 102
| 40
| 87
| 5
| 3
| 4
| 35
|
507
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/tokenization_albert_fast.py
|
transformers.models.albert.tokenization_albert_fast.AlbertTokenizerFast
|
from ...tokenization_utils import AddedToken
from typing import Optional
from shutil import copyfile
from ...tokenization_utils_fast import PreTrainedTokenizerFast
import os
class AlbertTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `False`):
Whether or not to keep accents when tokenizing.
bos_token (`str`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"[SEP]"`):
The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token
that is used for the end of sequence. The token used is the `sep_token`.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = AlbertTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, remove_space=True, keep_accents=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An ALBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
class AlbertTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `False`):
Whether or not to keep accents when tokenizing.
bos_token (`str`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"[SEP]"`):
The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token
that is used for the end of sequence. The token used is the `sep_token`.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
'''
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, remove_space=True, keep_accents=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An ALBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 4
| 2
| 23
| 3
| 15
| 6
| 3
| 0.92
| 1
| 5
| 0
| 0
| 5
| 4
| 5
| 93
| 172
| 24
| 77
| 37
| 51
| 71
| 34
| 17
| 28
| 5
| 3
| 1
| 13
|
508
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/configuration_align.py
|
transformers.models.align.configuration_align.AlignConfig
|
from ...configuration_utils import PretrainedConfig
class AlignConfig(PretrainedConfig):
"""
[`AlignConfig`] is the configuration class to store the configuration of a [`AlignModel`]. It is used to
instantiate a ALIGN model according to the specified arguments, defining the text model and vision model configs.
Instantiating a configuration with the defaults will yield a similar configuration to that of the ALIGN
[kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`AlignTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`AlignVisionConfig`].
projection_dim (`int`, *optional*, defaults to 640):
Dimensionality of text and vision projection layers.
temperature_init_value (`float`, *optional*, defaults to 1.0):
The initial value of the *temperature* parameter. Default is used as per the original ALIGN implementation.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import AlignConfig, AlignModel
>>> # Initializing a AlignConfig with kakaobrain/align-base style configuration
>>> configuration = AlignConfig()
>>> # Initializing a AlignModel (with random weights) from the kakaobrain/align-base style configuration
>>> model = AlignModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a AlignConfig from a AlignTextConfig and a AlignVisionConfig
>>> from transformers import AlignTextConfig, AlignVisionConfig
>>> # Initializing ALIGN Text and Vision configurations
>>> config_text = AlignTextConfig()
>>> config_vision = AlignVisionConfig()
>>> config = AlignConfig.from_text_vision_configs(config_text, config_vision)
```"""
model_type = 'align'
sub_configs = {'text_config': AlignTextConfig, 'vision_config': AlignVisionConfig}
def __init__(self, text_config=None, vision_config=None, projection_dim=640, temperature_init_value=1.0, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
if text_config is None:
text_config = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.')
if vision_config is None:
vision_config = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.')
self.text_config = AlignTextConfig(**text_config)
self.vision_config = AlignVisionConfig(**vision_config)
self.projection_dim = projection_dim
self.temperature_init_value = temperature_init_value
self.initializer_range = initializer_range
|
class AlignConfig(PretrainedConfig):
'''
[`AlignConfig`] is the configuration class to store the configuration of a [`AlignModel`]. It is used to
instantiate a ALIGN model according to the specified arguments, defining the text model and vision model configs.
Instantiating a configuration with the defaults will yield a similar configuration to that of the ALIGN
[kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`AlignTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`AlignVisionConfig`].
projection_dim (`int`, *optional*, defaults to 640):
Dimensionality of text and vision projection layers.
temperature_init_value (`float`, *optional*, defaults to 1.0):
The initial value of the *temperature* parameter. Default is used as per the original ALIGN implementation.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import AlignConfig, AlignModel
>>> # Initializing a AlignConfig with kakaobrain/align-base style configuration
>>> configuration = AlignConfig()
>>> # Initializing a AlignModel (with random weights) from the kakaobrain/align-base style configuration
>>> model = AlignModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a AlignConfig from a AlignTextConfig and a AlignVisionConfig
>>> from transformers import AlignTextConfig, AlignVisionConfig
>>> # Initializing ALIGN Text and Vision configurations
>>> config_text = AlignTextConfig()
>>> config_vision = AlignVisionConfig()
>>> config = AlignConfig.from_text_vision_configs(config_text, config_vision)
```'''
def __init__(self, text_config=None, vision_config=None, projection_dim=640, temperature_init_value=1.0, initializer_range=0.02, **kwargs):
pass
| 2
| 1
| 18
| 3
| 12
| 3
| 2
| 1.56
| 1
| 3
| 2
| 0
| 1
| 5
| 2
| 2
| 88
| 19
| 27
| 19
| 15
| 42
| 18
| 10
| 15
| 3
| 1
| 1
| 4
|
509
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/configuration_align.py
|
transformers.models.align.configuration_align.AlignTextConfig
|
from ...configuration_utils import PretrainedConfig
class AlignTextConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`AlignTextModel`]. It is used to instantiate a
ALIGN text encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the text encoder of the ALIGN
[kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture. The default values here are
copied from BERT.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Align Text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`AlignTextModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`AlignTextModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
Example:
```python
>>> from transformers import AlignTextConfig, AlignTextModel
>>> # Initializing a AlignTextConfig with kakaobrain/align-base style configuration
>>> configuration = AlignTextConfig()
>>> # Initializing a AlignTextModel (with random weights) from the kakaobrain/align-base style configuration
>>> model = AlignTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'align_text_model'
base_config_key = 'text_config'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.pad_token_id = pad_token_id
|
class AlignTextConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`AlignTextModel`]. It is used to instantiate a
ALIGN text encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the text encoder of the ALIGN
[kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture. The default values here are
copied from BERT.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Align Text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`AlignTextModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`AlignTextModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
Example:
```python
>>> from transformers import AlignTextConfig, AlignTextModel
>>> # Initializing a AlignTextConfig with kakaobrain/align-base style configuration
>>> configuration = AlignTextConfig()
>>> # Initializing a AlignTextModel (with random weights) from the kakaobrain/align-base style configuration
>>> model = AlignTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, **kwargs):
pass
| 2
| 1
| 36
| 1
| 35
| 0
| 1
| 1.5
| 1
| 1
| 0
| 0
| 1
| 15
| 1
| 1
| 105
| 10
| 38
| 37
| 18
| 57
| 20
| 19
| 18
| 1
| 1
| 0
| 1
|
510
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/configuration_align.py
|
transformers.models.align.configuration_align.AlignVisionConfig
|
from ...configuration_utils import PretrainedConfig
class AlignVisionConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`AlignVisionModel`]. It is used to instantiate a
ALIGN vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the vision encoder of the ALIGN
[kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture. The default values are copied
from EfficientNet (efficientnet-b7)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 600):
The input image size.
width_coefficient (`float`, *optional*, defaults to 2.0):
Scaling coefficient for network width at each stage.
depth_coefficient (`float`, *optional*, defaults to 3.1):
Scaling coefficient for network depth at each stage.
depth_divisor `int`, *optional*, defaults to 8):
A unit of network width.
kernel_sizes (`list[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`):
List of kernel sizes to be used in each block.
in_channels (`list[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`):
List of input channel sizes to be used in each block for convolutional layers.
out_channels (`list[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`):
List of output channel sizes to be used in each block for convolutional layers.
depthwise_padding (`list[int]`, *optional*, defaults to `[]`):
List of block indices with square padding.
strides (`list[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`):
List of stride sizes to be used in each block for convolutional layers.
num_block_repeats (`list[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`):
List of the number of times each block is to repeated.
expand_ratios (`list[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`):
List of scaling coefficient of each block.
squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25):
Squeeze expansion ratio.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
`"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported.
hidden_dim (`int`, *optional*, defaults to 1280):
The hidden dimension of the layer before the classification head.
pooling_type (`str` or `function`, *optional*, defaults to `"mean"`):
Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`,
`"max"`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
batch_norm_eps (`float`, *optional*, defaults to 1e-3):
The epsilon used by the batch normalization layers.
batch_norm_momentum (`float`, *optional*, defaults to 0.99):
The momentum used by the batch normalization layers.
drop_connect_rate (`float`, *optional*, defaults to 0.2):
The drop rate for skip connections.
Example:
```python
>>> from transformers import AlignVisionConfig, AlignVisionModel
>>> # Initializing a AlignVisionConfig with kakaobrain/align-base style configuration
>>> configuration = AlignVisionConfig()
>>> # Initializing a AlignVisionModel (with random weights) from the kakaobrain/align-base style configuration
>>> model = AlignVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'align_vision_model'
base_config_key = 'vision_config'
def __init__(self, num_channels: int=3, image_size: int=600, width_coefficient: float=2.0, depth_coefficient: float=3.1, depth_divisor: int=8, kernel_sizes: list[int]=[3, 3, 5, 3, 5, 5, 3], in_channels: list[int]=[32, 16, 24, 40, 80, 112, 192], out_channels: list[int]=[16, 24, 40, 80, 112, 192, 320], depthwise_padding: list[int]=[], strides: list[int]=[1, 2, 2, 2, 1, 2, 1], num_block_repeats: list[int]=[1, 2, 2, 3, 3, 4, 1], expand_ratios: list[int]=[1, 6, 6, 6, 6, 6, 6], squeeze_expansion_ratio: float=0.25, hidden_act: str='swish', hidden_dim: int=2560, pooling_type: str='mean', initializer_range: float=0.02, batch_norm_eps: float=0.001, batch_norm_momentum: float=0.99, drop_connect_rate: float=0.2, **kwargs):
super().__init__(**kwargs)
self.num_channels = num_channels
self.image_size = image_size
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.depth_divisor = depth_divisor
self.kernel_sizes = kernel_sizes
self.in_channels = in_channels
self.out_channels = out_channels
self.depthwise_padding = depthwise_padding
self.strides = strides
self.num_block_repeats = num_block_repeats
self.expand_ratios = expand_ratios
self.squeeze_expansion_ratio = squeeze_expansion_ratio
self.hidden_act = hidden_act
self.hidden_dim = hidden_dim
self.pooling_type = pooling_type
self.initializer_range = initializer_range
self.batch_norm_eps = batch_norm_eps
self.batch_norm_momentum = batch_norm_momentum
self.drop_connect_rate = drop_connect_rate
self.num_hidden_layers = sum(num_block_repeats) * 4
|
class AlignVisionConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`AlignVisionModel`]. It is used to instantiate a
ALIGN vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the vision encoder of the ALIGN
[kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture. The default values are copied
from EfficientNet (efficientnet-b7)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 600):
The input image size.
width_coefficient (`float`, *optional*, defaults to 2.0):
Scaling coefficient for network width at each stage.
depth_coefficient (`float`, *optional*, defaults to 3.1):
Scaling coefficient for network depth at each stage.
depth_divisor `int`, *optional*, defaults to 8):
A unit of network width.
kernel_sizes (`list[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`):
List of kernel sizes to be used in each block.
in_channels (`list[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`):
List of input channel sizes to be used in each block for convolutional layers.
out_channels (`list[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`):
List of output channel sizes to be used in each block for convolutional layers.
depthwise_padding (`list[int]`, *optional*, defaults to `[]`):
List of block indices with square padding.
strides (`list[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`):
List of stride sizes to be used in each block for convolutional layers.
num_block_repeats (`list[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`):
List of the number of times each block is to repeated.
expand_ratios (`list[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`):
List of scaling coefficient of each block.
squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25):
Squeeze expansion ratio.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
`"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported.
hidden_dim (`int`, *optional*, defaults to 1280):
The hidden dimension of the layer before the classification head.
pooling_type (`str` or `function`, *optional*, defaults to `"mean"`):
Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`,
`"max"`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
batch_norm_eps (`float`, *optional*, defaults to 1e-3):
The epsilon used by the batch normalization layers.
batch_norm_momentum (`float`, *optional*, defaults to 0.99):
The momentum used by the batch normalization layers.
drop_connect_rate (`float`, *optional*, defaults to 0.2):
The drop rate for skip connections.
Example:
```python
>>> from transformers import AlignVisionConfig, AlignVisionModel
>>> # Initializing a AlignVisionConfig with kakaobrain/align-base style configuration
>>> configuration = AlignVisionConfig()
>>> # Initializing a AlignVisionModel (with random weights) from the kakaobrain/align-base style configuration
>>> model = AlignVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_channels: int=3, image_size: int=600, width_coefficient: float=2.0, depth_coefficient: float=3.1, depth_divisor: int=8, kernel_sizes: list[int]=[3, 3, 5, 3, 5, 5, 3], in_channels: list[int]=[32, 16, 24, 40, 80, 112, 192], out_channels: list[int]=[16, 24, 40, 80, 112, 192, 320], depthwise_padding: list[int]=[], strides: list[int]=[1, 2, 2, 2, 1, 2, 1], num_block_repeats: list[int]=[1, 2, 2, 3, 3, 4, 1], expand_ratios: list[int]=[1, 6, 6, 6, 6, 6, 6], squeeze_expansion_ratio: float=0.25, hidden_act: str='swish', hidden_dim: int=2560, pooling_type: str='mean', initializer_range: float=0.02, batch_norm_eps: float=0.001, batch_norm_momentum: float=0.99, drop_connect_rate: float=0.2, **kwargs):
pass
| 2
| 1
| 47
| 1
| 46
| 0
| 1
| 1.24
| 1
| 4
| 0
| 0
| 1
| 21
| 1
| 1
| 120
| 10
| 49
| 48
| 24
| 61
| 26
| 25
| 24
| 1
| 1
| 0
| 1
|
511
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignModel
|
import torch
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging
from torch import nn
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
from typing import Any, Callable, Optional, Union
@auto_docstring
class AlignModel(AlignPreTrainedModel):
config: AlignConfig
def __init__(self, config: AlignConfig):
super().__init__(config)
if not isinstance(config.text_config, AlignTextConfig):
raise TypeError(f'config.text_config is expected to be of type AlignTextConfig but is of type {type(config.text_config)}.')
if not isinstance(config.vision_config, AlignVisionConfig):
raise TypeError(f'config.vision_config is expected to be of type AlignVisionConfig but is of type {type(config.vision_config)}.')
text_config = config.text_config
vision_config = config.vision_config
self.projection_dim = config.projection_dim
self.text_embed_dim = text_config.hidden_size
self.text_model = AlignTextModel(text_config)
self.vision_model = AlignVisionModel(vision_config)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim)
self.temperature = nn.Parameter(torch.tensor(self.config.temperature_init_value))
self.post_init()
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None) -> torch.FloatTensor:
"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`AlignTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoTokenizer, AlignModel
>>> model = AlignModel.from_pretrained("kakaobrain/align-base")
>>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```"""
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds)
last_hidden_state = text_outputs[0][:, 0, :]
text_features = self.text_projection(last_hidden_state)
return text_features
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(self, pixel_values: torch.FloatTensor) -> torch.FloatTensor:
"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`AlignVisionModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, AlignModel
>>> from transformers.image_utils import load_image
>>> model = AlignModel.from_pretrained("kakaobrain/align-base")
>>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```"""
vision_outputs = self.vision_model(pixel_values=pixel_values)
image_features = vision_outputs.pooler_output
return image_features
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, AlignOutput]:
"""
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, AlignModel
>>> from transformers.image_utils import load_image
>>> model = AlignModel.from_pretrained("kakaobrain/align-base")
>>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(
... images=image, text=["a photo of a cat", "a photo of a dog"], return_tensors="pt", padding=True
... )
>>> with torch.inference_mode():
... outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=True)
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
image_embeds = vision_outputs[1]
text_embeds = text_outputs[0][:, 0, :]
text_embeds = self.text_projection(text_embeds)
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) / self.temperature
logits_per_image = logits_per_text.t()
loss = None
if return_loss:
loss = align_loss(logits_per_text)
return AlignOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
|
@auto_docstring
class AlignModel(AlignPreTrainedModel):
def __init__(self, config: AlignConfig):
pass
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None) -> torch.FloatTensor:
'''
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`AlignTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoTokenizer, AlignModel
>>> model = AlignModel.from_pretrained("kakaobrain/align-base")
>>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```'''
pass
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(self, pixel_values: torch.FloatTensor) -> torch.FloatTensor:
'''
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`AlignVisionModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, AlignModel
>>> from transformers.image_utils import load_image
>>> model = AlignModel.from_pretrained("kakaobrain/align-base")
>>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```'''
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, AlignOutput]:
'''
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, AlignModel
>>> from transformers.image_utils import load_image
>>> model = AlignModel.from_pretrained("kakaobrain/align-base")
>>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(
... images=image, text=["a photo of a cat", "a photo of a dog"], return_tensors="pt", padding=True
... )
>>> with torch.inference_mode():
... outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```'''
pass
| 12
| 3
| 54
| 9
| 32
| 13
| 4
| 0.4
| 1
| 11
| 6
| 0
| 4
| 6
| 4
| 5
| 225
| 41
| 132
| 59
| 94
| 53
| 51
| 27
| 46
| 7
| 2
| 1
| 17
|
512
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignOutput
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndNoAttention
from dataclasses import dataclass
from typing import Any, Callable, Optional, Union
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging
@dataclass
@auto_docstring
class AlignOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The output of [`AlignVisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`AlignTextModel`].
vision_model_output (`BaseModelOutputWithPoolingAndNoAttention`):
The output of the [`AlignVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPoolingAndNoAttention = None
def to_tuple(self) -> tuple[Any]:
return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
|
@dataclass
@auto_docstring
class AlignOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The output of [`AlignVisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`AlignTextModel`].
vision_model_output (`BaseModelOutputWithPoolingAndNoAttention`):
The output of the [`AlignVisionModel`].
'''
def to_tuple(self) -> tuple[Any]:
pass
| 4
| 1
| 5
| 0
| 5
| 0
| 2
| 1.46
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 34
| 2
| 13
| 9
| 11
| 19
| 10
| 9
| 8
| 2
| 1
| 0
| 2
|
513
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
from torch import nn
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging
@auto_docstring
class AlignPreTrainedModel(PreTrainedModel):
config: AlignConfig
base_model_prefix = 'align'
supports_gradient_checkpointing = True
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
std = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, AlignModel):
nn.init.xavier_uniform_(module.text_projection.weight)
module.text_projection.bias.data.zero_()
module.temperature.data.fill_(self.config.temperature_init_value)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class AlignPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
'''Initialize the weights'''
pass
| 3
| 1
| 17
| 0
| 16
| 1
| 7
| 0.25
| 1
| 1
| 1
| 3
| 1
| 0
| 1
| 1
| 27
| 2
| 20
| 5
| 18
| 5
| 18
| 5
| 16
| 7
| 1
| 2
| 7
|
514
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignTextAttention
|
from typing import Any, Callable, Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch
from torch import nn
class AlignTextAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = AlignTextSelfAttention(config)
self.output = AlignTextSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, **kwargs)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class AlignTextAttention(nn.Module):
def __init__(self, config):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
515
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignTextEmbeddings
|
import torch
from torch import nn
from typing import Any, Callable, Optional, Union
class AlignTextEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class AlignTextEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 29
| 3
| 23
| 3
| 4
| 0.15
| 1
| 3
| 0
| 0
| 2
| 6
| 2
| 12
| 62
| 8
| 47
| 23
| 37
| 7
| 34
| 16
| 31
| 7
| 1
| 2
| 8
|
516
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignTextEncoder
|
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging
from typing import Any, Callable, Optional, Union
from torch import nn
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndNoAttention
class AlignTextEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([AlignTextLayer(config) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, **kwargs)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class AlignTextEncoder(nn.Module):
def __init__(self, config):
pass
@can_return_tuple
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutput]:
pass
| 4
| 0
| 45
| 4
| 41
| 0
| 9
| 0
| 1
| 8
| 2
| 0
| 2
| 3
| 2
| 12
| 91
| 8
| 83
| 26
| 68
| 0
| 35
| 14
| 32
| 17
| 1
| 3
| 18
|
517
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignTextIntermediate
|
from ...activations import ACT2FN
import torch
from torch import nn
class AlignTextIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class AlignTextIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
518
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignTextLayer
|
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...modeling_layers import GradientCheckpointingLayer
from typing import Any, Callable, Optional, Union
import torch
class AlignTextLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = AlignTextAttention(config)
self.intermediate = AlignTextIntermediate(config)
self.output = AlignTextOutput(config)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, **kwargs)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class AlignTextLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 84
| 9
| 70
| 32
| 57
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
519
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignTextModel
|
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndNoAttention
from typing import Any, Callable, Optional, Union
import torch
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging
@auto_docstring(custom_intro='\n The text model from ALIGN without any head or projection on top.\n ')
class AlignTextModel(AlignPreTrainedModel):
config: AlignTextConfig
_no_split_modules = ['AlignTextEmbeddings']
def __init__(self, config: AlignTextConfig, add_pooling_layer: bool=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = AlignTextEmbeddings(config)
self.encoder = AlignTextEncoder(config)
self.pooler = AlignTextPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, BaseModelOutputWithPooling]:
"""
Examples:
```python
>>> from transformers import AutoTokenizer, AlignTextModel
>>> model = AlignTextModel.from_pretrained("kakaobrain/align-base")
>>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, 'token_type_ids'):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, **kwargs)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring(custom_intro='\n The text model from ALIGN without any head or projection on top.\n ')
class AlignTextModel(AlignPreTrainedModel):
def __init__(self, config: AlignTextConfig, add_pooling_layer: bool=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, BaseModelOutputWithPooling]:
'''
Examples:
```python
>>> from transformers import AutoTokenizer, AlignTextModel
>>> model = AlignTextModel.from_pretrained("kakaobrain/align-base")
>>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
```'''
pass
| 8
| 2
| 28
| 4
| 19
| 5
| 4
| 0.25
| 1
| 9
| 5
| 0
| 4
| 4
| 4
| 5
| 121
| 21
| 80
| 33
| 62
| 20
| 43
| 21
| 38
| 13
| 2
| 2
| 17
|
520
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignTextOutput
|
import torch
from torch import nn
class AlignTextOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class AlignTextOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
521
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignTextPooler
|
from torch import nn
import torch
class AlignTextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class AlignTextPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
522
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignTextSelfAttention
|
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
from typing import Any, Callable, Optional, Union
class AlignTextSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.attention_dropout = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size ** (-0.5)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_states = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, head_mask=head_mask, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
|
class AlignTextSelfAttention(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 0
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
523
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignTextSelfOutput
|
import torch
from torch import nn
class AlignTextSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class AlignTextSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
524
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignVisionBlock
|
import torch
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
from torch import nn
class AlignVisionBlock(nn.Module):
"""
This corresponds to the block module of original the EfficientNet vision encoder implementation.
Args:
config ([`AlignVisionConfig`]):
Model configuration class.
in_dim (`int`):
Number of input channels.
out_dim (`int`):
Number of output channels.
stride (`int`):
Stride size to be used in convolution layers.
expand_ratio (`int`):
Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
kernel_size (`int`):
Kernel size for the depthwise convolution layer.
drop_rate (`float`):
Dropout rate to be used in the final phase of each block.
id_skip (`bool`):
Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
of each block. Set to `True` for the first block of each stage.
adjust_padding (`bool`):
Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
operation, set to `True` for inputs with odd input sizes.
"""
def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool):
super().__init__()
self.expand_ratio = expand_ratio
self.expand = self.expand_ratio != 1
expand_in_dim = in_dim * expand_ratio
if self.expand:
self.expansion = AlignVisionExpansionLayer(config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride)
self.depthwise_conv = AlignVisionDepthwiseLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, stride=stride, kernel_size=kernel_size, adjust_padding=adjust_padding)
self.squeeze_excite = AlignVisionSqueezeExciteLayer(config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand)
self.projection = AlignVisionFinalBlockLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, out_dim=out_dim, stride=stride, drop_rate=drop_rate, id_skip=id_skip)
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
embeddings = hidden_states
if self.expand_ratio != 1:
hidden_states = self.expansion(hidden_states)
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.squeeze_excite(hidden_states)
hidden_states = self.projection(embeddings, hidden_states)
return hidden_states
|
class AlignVisionBlock(nn.Module):
'''
This corresponds to the block module of original the EfficientNet vision encoder implementation.
Args:
config ([`AlignVisionConfig`]):
Model configuration class.
in_dim (`int`):
Number of input channels.
out_dim (`int`):
Number of output channels.
stride (`int`):
Stride size to be used in convolution layers.
expand_ratio (`int`):
Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
kernel_size (`int`):
Kernel size for the depthwise convolution layer.
drop_rate (`float`):
Dropout rate to be used in the final phase of each block.
id_skip (`bool`):
Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
of each block. Set to `True` for the first block of each stage.
adjust_padding (`bool`):
Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
operation, set to `True` for inputs with odd input sizes.
'''
def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool):
pass
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 1
| 26
| 2
| 23
| 1
| 4
| 0.55
| 1
| 10
| 5
| 0
| 2
| 6
| 2
| 12
| 79
| 6
| 47
| 22
| 33
| 26
| 19
| 11
| 16
| 5
| 1
| 1
| 7
|
525
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignVisionDepthwiseConv2d
|
from torch import nn
class AlignVisionDepthwiseConv2d(nn.Conv2d):
def __init__(self, in_channels, depth_multiplier=1, kernel_size=3, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'):
out_channels = in_channels * depth_multiplier
super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias, padding_mode=padding_mode)
|
class AlignVisionDepthwiseConv2d(nn.Conv2d):
def __init__(self, in_channels, depth_multiplier=1, kernel_size=3, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'):
pass
| 2
| 0
| 23
| 0
| 23
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 24
| 0
| 24
| 13
| 12
| 0
| 4
| 3
| 2
| 1
| 1
| 0
| 1
|
526
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignVisionDepthwiseLayer
|
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
from ...activations import ACT2FN
from torch import nn
import torch
class AlignVisionDepthwiseLayer(nn.Module):
"""
This corresponds to the depthwise convolution phase of each block in the original implementation.
"""
def __init__(self, config: AlignVisionConfig, in_dim: int, stride: int, kernel_size: int, adjust_padding: bool):
super().__init__()
self.stride = stride
conv_pad = 'valid' if self.stride == 2 else 'same'
padding = correct_pad(kernel_size, adjust=adjust_padding)
self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding)
self.depthwise_conv = AlignVisionDepthwiseConv2d(in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False)
self.depthwise_norm = nn.BatchNorm2d(num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
self.depthwise_act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
if self.stride == 2:
hidden_states = self.depthwise_conv_pad(hidden_states)
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.depthwise_norm(hidden_states)
hidden_states = self.depthwise_act(hidden_states)
return hidden_states
|
class AlignVisionDepthwiseLayer(nn.Module):
'''
This corresponds to the depthwise convolution phase of each block in the original implementation.
'''
def __init__(self, config: AlignVisionConfig, in_dim: int, stride: int, kernel_size: int, adjust_padding: bool):
pass
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 1
| 16
| 2
| 14
| 1
| 2
| 0.14
| 1
| 6
| 2
| 0
| 2
| 5
| 2
| 12
| 37
| 5
| 28
| 17
| 18
| 4
| 17
| 10
| 14
| 2
| 1
| 1
| 4
|
527
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignVisionEmbeddings
|
from torch import nn
from ...activations import ACT2FN
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
import torch
class AlignVisionEmbeddings(nn.Module):
"""
A module that corresponds to the stem module of the original work.
"""
def __init__(self, config: AlignVisionConfig):
super().__init__()
self.out_dim = round_filters(config, 32)
self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1))
self.convolution = nn.Conv2d(config.num_channels, self.out_dim, kernel_size=3, stride=2, padding='valid', bias=False)
self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
self.activation = ACT2FN[config.hidden_act]
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
features = self.padding(pixel_values)
features = self.convolution(features)
features = self.batchnorm(features)
features = self.activation(features)
return features
|
class AlignVisionEmbeddings(nn.Module):
'''
A module that corresponds to the stem module of the original work.
'''
def __init__(self, config: AlignVisionConfig):
pass
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 9
| 1
| 8
| 0
| 1
| 0.19
| 1
| 3
| 1
| 0
| 2
| 5
| 2
| 12
| 23
| 4
| 16
| 9
| 13
| 3
| 14
| 9
| 11
| 1
| 1
| 0
| 2
|
528
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignVisionEncoder
|
import torch
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndNoAttention
from typing import Any, Callable, Optional, Union
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
import math
class AlignVisionEncoder(nn.Module):
"""
Forward propagates the embeddings through each vision encoder (EfficientNet) block.
Args:
config ([`AlignVisionConfig`]):
Model configuration class.
"""
def __init__(self, config: AlignVisionConfig):
super().__init__()
self.depth_coefficient = config.depth_coefficient
def round_repeats(repeats):
return int(math.ceil(self.depth_coefficient * repeats))
num_base_blocks = len(config.in_channels)
num_blocks = sum((round_repeats(n) for n in config.num_block_repeats))
curr_block_num = 0
blocks = []
for i in range(num_base_blocks):
in_dim = round_filters(config, config.in_channels[i])
out_dim = round_filters(config, config.out_channels[i])
stride = config.strides[i]
kernel_size = config.kernel_sizes[i]
expand_ratio = config.expand_ratios[i]
for j in range(round_repeats(config.num_block_repeats[i])):
id_skip = j == 0
stride = 1 if j > 0 else stride
in_dim = out_dim if j > 0 else in_dim
adjust_padding = curr_block_num not in config.depthwise_padding
drop_rate = config.drop_connect_rate * curr_block_num / num_blocks
block = AlignVisionBlock(config=config, in_dim=in_dim, out_dim=out_dim, stride=stride, kernel_size=kernel_size, expand_ratio=expand_ratio, drop_rate=drop_rate, id_skip=id_skip, adjust_padding=adjust_padding)
blocks.append(block)
curr_block_num += 1
self.blocks = nn.ModuleList(blocks)
def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> BaseModelOutputWithPoolingAndNoAttention:
all_hidden_states = (hidden_states,) if output_hidden_states else None
for block in self.blocks:
hidden_states = block(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
|
class AlignVisionEncoder(nn.Module):
'''
Forward propagates the embeddings through each vision encoder (EfficientNet) block.
Args:
config ([`AlignVisionConfig`]):
Model configuration class.
'''
def __init__(self, config: AlignVisionConfig):
pass
def round_repeats(repeats):
pass
def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> BaseModelOutputWithPoolingAndNoAttention:
pass
| 4
| 1
| 22
| 3
| 18
| 1
| 4
| 0.13
| 1
| 9
| 4
| 0
| 2
| 2
| 2
| 12
| 72
| 12
| 53
| 28
| 44
| 7
| 35
| 23
| 31
| 7
| 1
| 2
| 13
|
529
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignVisionExpansionLayer
|
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
import torch
from torch import nn
from ...activations import ACT2FN
class AlignVisionExpansionLayer(nn.Module):
"""
This corresponds to the expansion phase of each block in the original implementation.
"""
def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int):
super().__init__()
self.expand_conv = nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding='same', bias=False)
self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps)
self.expand_act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
hidden_states = self.expand_conv(hidden_states)
hidden_states = self.expand_bn(hidden_states)
hidden_states = self.expand_act(hidden_states)
return hidden_states
|
class AlignVisionExpansionLayer(nn.Module):
'''
This corresponds to the expansion phase of each block in the original implementation.
'''
def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int):
pass
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 1
| 9
| 1
| 8
| 1
| 1
| 0.24
| 1
| 4
| 1
| 0
| 2
| 3
| 2
| 12
| 24
| 3
| 17
| 6
| 14
| 4
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
530
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignVisionFinalBlockLayer
|
from torch import nn
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
import torch
class AlignVisionFinalBlockLayer(nn.Module):
"""
This corresponds to the final phase of each block in the original implementation.
"""
def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool):
super().__init__()
self.apply_dropout = stride == 1 and (not id_skip)
self.project_conv = nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding='same', bias=False)
self.project_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
self.dropout = nn.Dropout(p=drop_rate)
def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor:
hidden_states = self.project_conv(hidden_states)
hidden_states = self.project_bn(hidden_states)
if self.apply_dropout:
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + embeddings
return hidden_states
|
class AlignVisionFinalBlockLayer(nn.Module):
'''
This corresponds to the final phase of each block in the original implementation.
'''
def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool):
pass
def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 1
| 13
| 1
| 12
| 0
| 2
| 0.13
| 1
| 6
| 1
| 0
| 2
| 4
| 2
| 12
| 31
| 4
| 24
| 9
| 19
| 3
| 14
| 7
| 11
| 2
| 1
| 1
| 3
|
531
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignVisionModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndNoAttention
import torch
from torch import nn
from typing import Any, Callable, Optional, Union
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging
@auto_docstring(custom_intro='\n The vision model from ALIGN without any head or projection on top.\n ')
class AlignVisionModel(AlignPreTrainedModel):
config: AlignVisionConfig
main_input_name = 'pixel_values'
supports_gradient_checkpointing = False
def __init__(self, config: AlignVisionConfig):
super().__init__(config)
self.config = config
self.embeddings = AlignVisionEmbeddings(config)
self.encoder = AlignVisionEncoder(config)
if config.pooling_type == 'mean':
self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True)
elif config.pooling_type == 'max':
self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True)
else:
raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}")
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.convolution
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AlignVisionModel
>>> model = AlignVisionModel.from_pretrained("kakaobrain/align-base")
>>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```"""
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=True)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
pooled_output = pooled_output.reshape(pooled_output.shape[:2])
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring(custom_intro='\n The vision model from ALIGN without any head or projection on top.\n ')
class AlignVisionModel(AlignPreTrainedModel):
def __init__(self, config: AlignVisionConfig):
pass
def get_input_embeddings(self) -> nn.Module:
pass
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AlignVisionModel
>>> model = AlignVisionModel.from_pretrained("kakaobrain/align-base")
>>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```'''
pass
| 7
| 1
| 25
| 4
| 14
| 7
| 3
| 0.42
| 1
| 7
| 4
| 0
| 3
| 4
| 3
| 4
| 83
| 15
| 48
| 21
| 37
| 20
| 29
| 15
| 25
| 5
| 2
| 1
| 9
|
532
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/modeling_align.py
|
transformers.models.align.modeling_align.AlignVisionSqueezeExciteLayer
|
from torch import nn
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
import torch
from ...activations import ACT2FN
class AlignVisionSqueezeExciteLayer(nn.Module):
"""
This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
"""
def __init__(self, config: AlignVisionConfig, in_dim: int, expand_dim: int, expand: bool=False):
super().__init__()
self.dim = expand_dim if expand else in_dim
self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio))
self.squeeze = nn.AdaptiveAvgPool2d(output_size=1)
self.reduce = nn.Conv2d(in_channels=self.dim, out_channels=self.dim_se, kernel_size=1, padding='same')
self.expand = nn.Conv2d(in_channels=self.dim_se, out_channels=self.dim, kernel_size=1, padding='same')
self.act_reduce = ACT2FN[config.hidden_act]
self.act_expand = nn.Sigmoid()
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
inputs = hidden_states
hidden_states = self.squeeze(hidden_states)
hidden_states = self.reduce(hidden_states)
hidden_states = self.act_reduce(hidden_states)
hidden_states = self.expand(hidden_states)
hidden_states = self.act_expand(hidden_states)
hidden_states = torch.mul(inputs, hidden_states)
return hidden_states
|
class AlignVisionSqueezeExciteLayer(nn.Module):
'''
This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
'''
def __init__(self, config: AlignVisionConfig, in_dim: int, expand_dim: int, expand: bool=False):
pass
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 1
| 16
| 2
| 14
| 0
| 2
| 0.1
| 1
| 5
| 1
| 0
| 2
| 7
| 2
| 12
| 37
| 5
| 29
| 11
| 26
| 3
| 19
| 11
| 16
| 2
| 1
| 0
| 3
|
533
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/processing_align.py
|
transformers.models.align.processing_align.AlignProcessor
|
from ...processing_utils import ProcessingKwargs, ProcessorMixin
class AlignProcessor(ProcessorMixin):
"""
Constructs an ALIGN processor which wraps [`EfficientNetImageProcessor`] and
[`BertTokenizer`]/[`BertTokenizerFast`] into a single processor that inherits both the image processor and
tokenizer functionalities. See the [`~AlignProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more
information.
The preferred way of passing kwargs is as a dictionary per modality, see usage example below.
```python
from transformers import AlignProcessor
from PIL import Image
model_id = "kakaobrain/align-base"
processor = AlignProcessor.from_pretrained(model_id)
processor(
images=your_pil_image,
text=["What is that?"],
images_kwargs = {"crop_size": {"height": 224, "width": 224}},
text_kwargs = {"padding": "do_not_pad"},
common_kwargs = {"return_tensors": "pt"},
)
```
Args:
image_processor ([`EfficientNetImageProcessor`]):
The image processor is a required input.
tokenizer ([`BertTokenizer`, `BertTokenizerFast`]):
The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'EfficientNetImageProcessor'
tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')
valid_processor_kwargs = AlignProcessorKwargs
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
|
class AlignProcessor(ProcessorMixin):
'''
Constructs an ALIGN processor which wraps [`EfficientNetImageProcessor`] and
[`BertTokenizer`]/[`BertTokenizerFast`] into a single processor that inherits both the image processor and
tokenizer functionalities. See the [`~AlignProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more
information.
The preferred way of passing kwargs is as a dictionary per modality, see usage example below.
```python
from transformers import AlignProcessor
from PIL import Image
model_id = "kakaobrain/align-base"
processor = AlignProcessor.from_pretrained(model_id)
processor(
images=your_pil_image,
text=["What is that?"],
images_kwargs = {"crop_size": {"height": 224, "width": 224}},
text_kwargs = {"padding": "do_not_pad"},
common_kwargs = {"return_tensors": "pt"},
)
```
Args:
image_processor ([`EfficientNetImageProcessor`]):
The image processor is a required input.
tokenizer ([`BertTokenizer`, `BertTokenizerFast`]):
The tokenizer is a required input.
'''
def __init__(self, image_processor, tokenizer):
pass
| 2
| 1
| 17
| 1
| 8
| 8
| 2
| 1.45
| 1
| 6
| 2
| 0
| 5
| 0
| 5
| 22
| 123
| 15
| 44
| 23
| 30
| 64
| 30
| 15
| 24
| 7
| 2
| 1
| 11
|
534
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/align/processing_align.py
|
transformers.models.align.processing_align.AlignProcessorKwargs
|
from ...processing_utils import ProcessingKwargs, ProcessorMixin
class AlignProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {'text_kwargs': {'padding': 'max_length', 'max_length': 64}}
|
class AlignProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0
| 7
| 2
| 6
| 1
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
535
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/configuration_altclip.py
|
transformers.models.altclip.configuration_altclip.AltCLIPConfig
|
from ...configuration_utils import PretrainedConfig
class AltCLIPConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an
AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the AltCLIP
[BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`AltCLIPTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`AltCLIPVisionConfig`].
projection_dim (`int`, *optional*, defaults to 768):
Dimensionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter. Default is used as per the original CLIP implementation.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import AltCLIPConfig, AltCLIPModel
>>> # Initializing a AltCLIPConfig with BAAI/AltCLIP style configuration
>>> configuration = AltCLIPConfig()
>>> # Initializing a AltCLIPModel (with random weights) from the BAAI/AltCLIP style configuration
>>> model = AltCLIPModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a AltCLIPConfig from a AltCLIPTextConfig and a AltCLIPVisionConfig
>>> # Initializing a AltCLIPText and AltCLIPVision configuration
>>> config_text = AltCLIPTextConfig()
>>> config_vision = AltCLIPVisionConfig()
>>> config = AltCLIPConfig.from_text_vision_configs(config_text, config_vision)
```"""
model_type = 'altclip'
sub_configs = {'text_config': AltCLIPTextConfig, 'vision_config': AltCLIPVisionConfig}
def __init__(self, text_config=None, vision_config=None, projection_dim=768, logit_scale_init_value=2.6592, **kwargs):
text_config_dict = kwargs.pop('text_config_dict', None)
vision_config_dict = kwargs.pop('vision_config_dict', None)
super().__init__(**kwargs)
if text_config_dict is not None:
if text_config is None:
text_config = {}
_text_config_dict = AltCLIPTextConfig(**text_config_dict).to_dict()
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and (key not in ['transformers_version']):
if key in text_config_dict:
message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.'
else:
message = f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The value `text_config["{key}"]` will be overridden.'
logger.info(message)
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
vision_config = {}
_vision_config_dict = AltCLIPVisionConfig(**vision_config_dict).to_dict()
if 'id2label' in _vision_config_dict:
_vision_config_dict['id2label'] = {str(key): value for key, value in _vision_config_dict['id2label'].items()}
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and (key not in ['transformers_version']):
if key in vision_config_dict:
message = f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["{key}"]` will be used instead.'
else:
message = f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. The value `vision_config["{key}"]` will be overridden.'
logger.info(message)
vision_config.update(_vision_config_dict)
if text_config is None:
text_config = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.')
if vision_config is None:
vision_config = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.')
self.text_config = AltCLIPTextConfig(**text_config)
self.vision_config = AltCLIPVisionConfig(**vision_config)
self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0
|
class AltCLIPConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an
AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the AltCLIP
[BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`AltCLIPTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`AltCLIPVisionConfig`].
projection_dim (`int`, *optional*, defaults to 768):
Dimensionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter. Default is used as per the original CLIP implementation.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import AltCLIPConfig, AltCLIPModel
>>> # Initializing a AltCLIPConfig with BAAI/AltCLIP style configuration
>>> configuration = AltCLIPConfig()
>>> # Initializing a AltCLIPModel (with random weights) from the BAAI/AltCLIP style configuration
>>> model = AltCLIPModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a AltCLIPConfig from a AltCLIPTextConfig and a AltCLIPVisionConfig
>>> # Initializing a AltCLIPText and AltCLIPVision configuration
>>> config_text = AltCLIPTextConfig()
>>> config_vision = AltCLIPVisionConfig()
>>> config = AltCLIPConfig.from_text_vision_configs(config_text, config_vision)
```'''
def __init__(self, text_config=None, vision_config=None, projection_dim=768, logit_scale_init_value=2.6592, **kwargs):
pass
| 2
| 1
| 49
| 8
| 30
| 12
| 8
| 0.89
| 1
| 4
| 2
| 0
| 1
| 5
| 2
| 2
| 147
| 28
| 63
| 19
| 57
| 56
| 44
| 16
| 41
| 14
| 1
| 4
| 15
|
536
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/configuration_altclip.py
|
transformers.models.altclip.configuration_altclip.AltCLIPTextConfig
|
from ...configuration_utils import PretrainedConfig
class AltCLIPTextConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`AltCLIPTextModel`]. It is used to instantiate a
AltCLIP text model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the AltCLIP
[BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 250002):
Vocabulary size of the AltCLIP model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`AltCLIPTextModel`].
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 514):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 1):
The vocabulary size of the `token_type_ids` passed when calling [`AltCLIPTextModel`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 0.02):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 1): The id of the *padding* token.
bos_token_id (`int`, *optional*, defaults to 0): The id of the *beginning-of-sequence* token.
eos_token_id (`Union[int, list[int]]`, *optional*, defaults to 2):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
project_dim (`int`, *optional*, defaults to 768):
The dimensions of the teacher model before the mapping layer.
Examples:
```python
>>> from transformers import AltCLIPTextModel, AltCLIPTextConfig
>>> # Initializing a AltCLIPTextConfig with BAAI/AltCLIP style configuration
>>> configuration = AltCLIPTextConfig()
>>> # Initializing a AltCLIPTextModel (with random weights) from the BAAI/AltCLIP style configuration
>>> model = AltCLIPTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'altclip_text_model'
def __init__(self, vocab_size=250002, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, initializer_range=0.02, initializer_factor=0.02, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, project_dim=768, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.project_dim = project_dim
|
class AltCLIPTextConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`AltCLIPTextModel`]. It is used to instantiate a
AltCLIP text model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the AltCLIP
[BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 250002):
Vocabulary size of the AltCLIP model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`AltCLIPTextModel`].
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 514):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 1):
The vocabulary size of the `token_type_ids` passed when calling [`AltCLIPTextModel`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 0.02):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 1): The id of the *padding* token.
bos_token_id (`int`, *optional*, defaults to 0): The id of the *beginning-of-sequence* token.
eos_token_id (`Union[int, list[int]]`, *optional*, defaults to 2):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
project_dim (`int`, *optional*, defaults to 768):
The dimensions of the teacher model before the mapping layer.
Examples:
```python
>>> from transformers import AltCLIPTextModel, AltCLIPTextConfig
>>> # Initializing a AltCLIPTextConfig with BAAI/AltCLIP style configuration
>>> configuration = AltCLIPTextConfig()
>>> # Initializing a AltCLIPTextModel (with random weights) from the BAAI/AltCLIP style configuration
>>> model = AltCLIPTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=250002, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, initializer_range=0.02, initializer_factor=0.02, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, project_dim=768, **kwargs):
pass
| 2
| 1
| 41
| 1
| 40
| 0
| 1
| 1.5
| 1
| 1
| 0
| 0
| 1
| 16
| 1
| 1
| 116
| 11
| 42
| 41
| 18
| 63
| 20
| 19
| 18
| 1
| 1
| 0
| 1
|
537
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/configuration_altclip.py
|
transformers.models.altclip.configuration_altclip.AltCLIPVisionConfig
|
from ...configuration_utils import PretrainedConfig
class AltCLIPVisionConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an
AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the AltCLIP
[BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and vision projection layers.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import AltCLIPVisionConfig, AltCLIPVisionModel
>>> # Initializing a AltCLIPVisionConfig with BAAI/AltCLIP style configuration
>>> configuration = AltCLIPVisionConfig()
>>> # Initializing a AltCLIPVisionModel (with random weights) from the BAAI/AltCLIP style configuration
>>> model = AltCLIPVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'altclip_vision_model'
base_config_key = 'vision_config'
def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
|
class AltCLIPVisionConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an
AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the AltCLIP
[BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and vision projection layers.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import AltCLIPVisionConfig, AltCLIPVisionModel
>>> # Initializing a AltCLIPVisionConfig with BAAI/AltCLIP style configuration
>>> configuration = AltCLIPVisionConfig()
>>> # Initializing a AltCLIPVisionModel (with random weights) from the BAAI/AltCLIP style configuration
>>> model = AltCLIPVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs):
pass
| 2
| 1
| 32
| 1
| 31
| 0
| 1
| 1.35
| 1
| 1
| 0
| 0
| 1
| 13
| 1
| 1
| 91
| 11
| 34
| 33
| 16
| 46
| 18
| 17
| 16
| 1
| 1
| 0
| 1
|
538
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPAttention
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Any, Callable, Optional, Union
import torch
import torch.nn as nn
class AltCLIPAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
if self.config._attn_implementation != 'flash_attention_2':
if attention_mask is not None and causal_attention_mask is not None:
attention_mask = attention_mask + causal_attention_mask
elif causal_attention_mask is not None:
attention_mask = causal_attention_mask
else:
self.is_causal = causal_attention_mask is not None
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class AltCLIPAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 2
| 32
| 5
| 25
| 2
| 4
| 0.11
| 1
| 5
| 0
| 0
| 3
| 10
| 3
| 13
| 102
| 19
| 75
| 30
| 65
| 8
| 54
| 24
| 50
| 8
| 1
| 2
| 11
|
539
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPEncoder
|
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndProjection
from typing import Any, Callable, Optional, Union
import torch.nn as nn
import torch
from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
class AltCLIPEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`AltCLIPEncoderLayer`].
Args:
config: AltCLIPConfig
"""
def __init__(self, config: AltCLIPConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([AltCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class AltCLIPEncoder(nn.Module):
'''
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`AltCLIPEncoderLayer`].
Args:
config: AltCLIPConfig
'''
def __init__(self, config: AltCLIPConfig):
pass
@can_return_tuple
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 4
| 2
| 43
| 5
| 25
| 13
| 7
| 0.61
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 95
| 13
| 51
| 19
| 40
| 31
| 27
| 11
| 24
| 12
| 1
| 2
| 13
|
540
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPEncoderLayer
|
import torch.nn as nn
from typing import Any, Callable, Optional, Union
import torch
from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
from ...modeling_layers import GradientCheckpointingLayer
class AltCLIPEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: AltCLIPConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = AltCLIPAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = AltCLIPMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class AltCLIPEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: AltCLIPConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 23
| 3
| 16
| 5
| 2
| 0.31
| 1
| 6
| 3
| 0
| 2
| 5
| 2
| 12
| 48
| 6
| 32
| 17
| 23
| 10
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
541
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPMLP
|
import torch.nn as nn
from ...activations import ACT2FN
import torch
class AltCLIPMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class AltCLIPMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
542
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPModel
|
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
import torch.nn as nn
import torch
from typing import Any, Callable, Optional, Union
from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
class AltCLIPModel(AltCLIPPreTrainedModel):
config: AltCLIPConfig
def __init__(self, config: AltCLIPConfig):
super().__init__(config)
if not isinstance(config.vision_config, AltCLIPVisionConfig):
raise TypeError(f'config.vision_config is expected to be of type AltCLIPVisionConfig but is of type {type(config.vision_config)}.')
if not isinstance(config.text_config, AltCLIPTextConfig):
raise TypeError(f'config.text_config is expected to be of type AltCLIPTextConfig but is of type {type(config.text_config)}.')
text_config = config.text_config
vision_config = config.vision_config
vision_config._attn_implementation = config._attn_implementation
self.projection_dim = config.projection_dim
self.text_embed_dim = text_config.project_dim
self.vision_embed_dim = vision_config.hidden_size
self.text_model = AltCLIPTextModel(text_config)
self.vision_model = AltCLIPVisionTransformer(vision_config)
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
self.post_init()
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor:
"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`AltCLIPTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, AltCLIPModel
>>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```"""
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids)
pooled_output = text_outputs.pooler_output
text_features = self.text_projection(pooled_output)
return text_features
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor:
"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`AltCLIPVisionModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, AltCLIPModel
>>> from transformers.image_utils import load_image
>>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```"""
vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
pooled_output = vision_outputs.pooler_output
image_features = self.visual_projection(pooled_output)
return image_features
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, AltCLIPOutput]:
"""
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AltCLIPModel
>>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
image_embeds = vision_outputs[1]
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
logits_per_image = logits_per_text.T
loss = None
if return_loss:
loss = clip_loss(logits_per_text)
if not return_dict:
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
return (loss,) + output if loss is not None else output
return AltCLIPOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
|
class AltCLIPModel(AltCLIPPreTrainedModel):
def __init__(self, config: AltCLIPConfig):
pass
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor:
'''
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`AltCLIPTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, AltCLIPModel
>>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```'''
pass
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor:
'''
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`AltCLIPVisionModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, AltCLIPModel
>>> from transformers.image_utils import load_image
>>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, AltCLIPOutput]:
'''
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AltCLIPModel
>>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```'''
pass
| 10
| 3
| 53
| 7
| 33
| 13
| 5
| 0.39
| 1
| 11
| 6
| 0
| 4
| 8
| 4
| 5
| 222
| 33
| 137
| 62
| 100
| 53
| 57
| 31
| 52
| 7
| 2
| 1
| 18
|
543
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPOutput
|
from dataclasses import dataclass
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndProjection
from typing import Any, Callable, Optional, Union
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
import torch.nn as nn
@dataclass
@auto_docstring
class AltCLIPOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPVisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`AltCLIPTextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`AltCLIPVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
|
@dataclass
@auto_docstring
class AltCLIPOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPVisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`AltCLIPTextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`AltCLIPVisionModel`].
'''
def to_tuple(self) -> tuple[Any]:
pass
| 4
| 1
| 5
| 0
| 5
| 0
| 2
| 1.46
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 34
| 2
| 13
| 9
| 11
| 19
| 10
| 9
| 8
| 2
| 1
| 0
| 2
|
544
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPPreTrainedModel
|
from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch.nn as nn
@auto_docstring
class AltCLIPPreTrainedModel(PreTrainedModel):
config: AltCLIPConfig
base_model_prefix = 'altclip'
supports_gradient_checkpointing = True
_no_split_module = []
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, AltCLIPVisionEmbeddings):
factor = self.config.initializer_factor
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim ** (-0.5) * factor)
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
elif isinstance(module, AltCLIPAttention):
factor = self.config.initializer_factor
in_proj_std = module.embed_dim ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor
out_proj_std = module.embed_dim ** (-0.5) * factor
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, AltCLIPMLP):
factor = self.config.initializer_factor
in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor
fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor
nn.init.normal_(module.fc1.weight, std=fc_std)
nn.init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, AltCLIPModel):
nn.init.normal_(module.text_projection.weight, std=module.text_embed_dim ** (-0.5) * self.config.initializer_factor)
module.text_projection._is_hf_initialized = True
nn.init.normal_(module.visual_projection.weight, std=module.vision_embed_dim ** (-0.5) * self.config.initializer_factor)
module.visual_projection._is_hf_initialized = True
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
@auto_docstring
class AltCLIPPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 44
| 0
| 43
| 1
| 10
| 0.1
| 1
| 4
| 4
| 4
| 1
| 0
| 1
| 1
| 55
| 2
| 48
| 10
| 46
| 5
| 36
| 10
| 34
| 10
| 1
| 2
| 10
|
545
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPTextModel
|
from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndProjection
from typing import Any, Callable, Optional, Union
import torch
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
import torch.nn as nn
class AltCLIPTextModel(AltCLIPPreTrainedModel):
config: AltCLIPTextConfig
def __init__(self, config):
super().__init__(config)
self.roberta = AltRobertaModel(config, add_pooling_layer=False)
self.transformation = nn.Linear(config.hidden_size, config.project_dim)
self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.roberta.embeddings.word_embeddings
def set_input_embeddings(self, value: nn.Embedding) -> None:
self.roberta.embeddings.word_embeddings = value
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None) -> nn.Embedding:
return super().resize_token_embeddings(new_num_tokens)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndProjection]:
"""
Examples:
```python
>>> from transformers import AutoProcessor, AltCLIPTextModel
>>> model = AltCLIPTextModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> texts = ["it's a cat", "it's a dog"]
>>> inputs = processor(text=texts, padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
sequence_output = outputs[0]
sequence_output = self.pre_LN(sequence_output)
projection_state = self.transformation(sequence_output)
pooler_output = projection_state[:, 0]
return BaseModelOutputWithPoolingAndProjection(last_hidden_state=projection_state, pooler_output=pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
class AltCLIPTextModel(AltCLIPPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self) -> nn.Module:
pass
def set_input_embeddings(self, value: nn.Embedding) -> None:
pass
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None) -> nn.Embedding:
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndProjection]:
'''
Examples:
```python
>>> from transformers import AutoProcessor, AltCLIPTextModel
>>> model = AltCLIPTextModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> texts = ["it's a cat", "it's a dog"]
>>> inputs = processor(text=texts, padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```'''
pass
| 8
| 1
| 16
| 3
| 10
| 3
| 1
| 0.29
| 1
| 6
| 2
| 0
| 5
| 3
| 5
| 6
| 90
| 18
| 56
| 28
| 35
| 16
| 24
| 14
| 18
| 3
| 2
| 1
| 7
|
546
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPVisionEmbeddings
|
from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
import torch
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
import torch.nn as nn
class AltCLIPVisionEmbeddings(nn.Module):
def __init__(self, config: AltCLIPVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size):
raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size}).")
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
|
class AltCLIPVisionEmbeddings(nn.Module):
def __init__(self, config: AltCLIPVisionConfig):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
pass
| 4
| 1
| 26
| 5
| 19
| 3
| 2
| 0.16
| 1
| 5
| 1
| 0
| 3
| 9
| 3
| 13
| 81
| 16
| 57
| 27
| 53
| 9
| 43
| 27
| 39
| 3
| 1
| 1
| 6
|
547
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPVisionModel
|
import torch.nn as nn
import torch
from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
from typing import Any, Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndProjection
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
class AltCLIPVisionModel(AltCLIPPreTrainedModel):
config: AltCLIPVisionConfig
main_input_name = 'pixel_values'
def __init__(self, config: AltCLIPVisionConfig):
super().__init__(config)
self.vision_model = AltCLIPVisionTransformer(config)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AltCLIPVisionModel
>>> model = AltCLIPVisionModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
|
class AltCLIPVisionModel(AltCLIPPreTrainedModel):
def __init__(self, config: AltCLIPVisionConfig):
pass
def get_input_embeddings(self) -> nn.Module:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
'''
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AltCLIPVisionModel
>>> model = AltCLIPVisionModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```'''
pass
| 5
| 1
| 15
| 2
| 7
| 6
| 1
| 0.63
| 1
| 5
| 3
| 0
| 3
| 1
| 3
| 4
| 54
| 10
| 27
| 15
| 14
| 17
| 12
| 7
| 8
| 2
| 2
| 0
| 4
|
548
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltCLIPVisionTransformer
|
from typing import Any, Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndProjection
import torch.nn as nn
from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
import torch
class AltCLIPVisionTransformer(nn.Module):
def __init__(self, config: AltCLIPVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = AltCLIPVisionEmbeddings(config)
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = AltCLIPEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
class AltCLIPVisionTransformer(nn.Module):
def __init__(self, config: AltCLIPVisionConfig):
pass
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False) -> Union[tuple, BaseModelOutputWithPooling]:
pass
| 5
| 0
| 27
| 4
| 21
| 2
| 4
| 0.07
| 1
| 7
| 4
| 0
| 2
| 5
| 2
| 12
| 57
| 9
| 45
| 21
| 33
| 3
| 24
| 13
| 21
| 6
| 1
| 1
| 7
|
549
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltRobertaAttention
|
from typing import Any, Callable, Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch.nn as nn
import torch
class AltRobertaAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = ALT_ROBERTA_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type)
self.output = AltRobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class AltRobertaAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
550
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltRobertaEmbeddings
|
import torch.nn as nn
import torch
from typing import Any, Callable, Optional, Union
class AltRobertaEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx)
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
if position_ids is None:
if input_ids is not None:
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape)
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
|
class AltRobertaEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
pass
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
'''
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
'''
pass
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
'''
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
'''
pass
| 7
| 3
| 26
| 3
| 18
| 5
| 3
| 0.32
| 1
| 1
| 0
| 0
| 3
| 7
| 3
| 13
| 87
| 13
| 56
| 23
| 50
| 18
| 43
| 21
| 39
| 8
| 1
| 2
| 10
|
551
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltRobertaEncoder
|
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
from typing import Any, Callable, Optional, Union
import torch.nn as nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndProjection
import torch
class AltRobertaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([AltRobertaLayer(config) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, **kwargs)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class AltRobertaEncoder(nn.Module):
def __init__(self, config):
pass
@can_return_tuple
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutput]:
pass
| 4
| 0
| 45
| 4
| 41
| 0
| 9
| 0
| 1
| 8
| 2
| 0
| 2
| 3
| 2
| 12
| 91
| 8
| 83
| 26
| 68
| 0
| 35
| 14
| 32
| 17
| 1
| 3
| 18
|
552
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltRobertaIntermediate
|
import torch.nn as nn
from ...activations import ACT2FN
import torch
class AltRobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class AltRobertaIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
553
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltRobertaLayer
|
import torch
import torch.nn as nn
from typing import Any, Callable, Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...modeling_layers import GradientCheckpointingLayer
class AltRobertaLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = AltRobertaAttention(config)
self.intermediate = AltRobertaIntermediate(config)
self.output = AltRobertaOutput(config)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, **kwargs)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class AltRobertaLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 84
| 9
| 70
| 32
| 57
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
554
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltRobertaModel
|
from typing import Any, Callable, Optional, Union
from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndProjection
import torch
import torch.nn as nn
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
@auto_docstring(custom_intro='\n The model behaves as an encoder following the architecture described in *Attention is\n all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz\n Kaiser and Illia Polosukhin.\n\n .. _*Attention is all you need*: https://huggingface.co/papers/1706.03762\n ')
class AltRobertaModel(AltCLIPPreTrainedModel):
config: AltCLIPTextConfig
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = AltRobertaEmbeddings(config)
self.encoder = AltRobertaEncoder(config)
self.pooler = AltRobertaPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, 'token_type_ids'):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring(custom_intro='\n The model behaves as an encoder following the architecture described in *Attention is\n all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz\n Kaiser and Illia Polosukhin.\n\n .. _*Attention is all you need*: https://huggingface.co/papers/1706.03762\n ')
class AltRobertaModel(AltCLIPPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
pass
| 8
| 2
| 30
| 3
| 20
| 7
| 5
| 0.44
| 1
| 8
| 4
| 0
| 5
| 4
| 5
| 6
| 175
| 27
| 103
| 41
| 82
| 45
| 56
| 26
| 50
| 18
| 2
| 2
| 24
|
555
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltRobertaOutput
|
import torch.nn as nn
import torch
class AltRobertaOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class AltRobertaOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
556
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltRobertaPooler
|
import torch.nn as nn
import torch
class AltRobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class AltRobertaPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
557
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltRobertaSelfAttention
|
import torch.nn as nn
from typing import Any, Callable, Optional, Union
import torch
import math
class AltRobertaSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
query_length, key_length = (query_layer.shape[2], key_layer.shape[2])
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class AltRobertaSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 0
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
558
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/modeling_altclip.py
|
transformers.models.altclip.modeling_altclip.AltRobertaSelfOutput
|
import torch.nn as nn
import torch
class AltRobertaSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class AltRobertaSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
559
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/altclip/processing_altclip.py
|
transformers.models.altclip.processing_altclip.AltCLIPProcessor
|
from ...processing_utils import ProcessorMixin
from ...utils.deprecation import deprecate_kwarg
class AltCLIPProcessor(ProcessorMixin):
"""
Constructs a AltCLIP processor which wraps a CLIP image processor and a XLM-Roberta tokenizer into a single
processor.
[`AltCLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`XLMRobertaTokenizerFast`]. See
the [`~AltCLIPProcessor.__call__`] and [`~AltCLIPProcessor.decode`] for more information.
Args:
image_processor ([`CLIPImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`XLMRobertaTokenizerFast`], *optional*):
The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = ('CLIPImageProcessor', 'CLIPImageProcessorFast')
tokenizer_class = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
@deprecate_kwarg(old_name='feature_extractor', version='5.0.0', new_name='image_processor')
def __init__(self, image_processor=None, tokenizer=None):
super().__init__(image_processor, tokenizer)
|
class AltCLIPProcessor(ProcessorMixin):
'''
Constructs a AltCLIP processor which wraps a CLIP image processor and a XLM-Roberta tokenizer into a single
processor.
[`AltCLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`XLMRobertaTokenizerFast`]. See
the [`~AltCLIPProcessor.__call__`] and [`~AltCLIPProcessor.decode`] for more information.
Args:
image_processor ([`CLIPImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`XLMRobertaTokenizerFast`], *optional*):
The tokenizer is a required input.
'''
@deprecate_kwarg(old_name='feature_extractor', version='5.0.0', new_name='image_processor')
def __init__(self, image_processor=None, tokenizer=None):
pass
| 3
| 1
| 18
| 2
| 9
| 7
| 3
| 0.96
| 1
| 6
| 2
| 0
| 5
| 0
| 5
| 22
| 115
| 17
| 50
| 24
| 35
| 48
| 35
| 15
| 29
| 8
| 2
| 1
| 14
|
560
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/configuration_aria.py
|
transformers.models.aria.configuration_aria.AriaConfig
|
from ...configuration_utils import PretrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
from typing import Optional
class AriaConfig(PretrainedConfig):
"""
This class handles the configuration for both vision and text components of the Aria model,
as well as additional parameters for image token handling and projector mapping.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`AriaVisionConfig` or `dict`, *optional*):
Configuration for the vision component.
vision_feature_layer (`int`, *optional*, defaults to -1):
The index of the layer to select the vision feature.
text_config (`AriaTextConfig` or `dict`, *optional*):
Configuration for the text component.
projector_patch_to_query_dict (`dict`, *optional*):
Mapping of patch sizes to query dimensions.
image_token_index (`int`, *optional*, defaults to 9):
Index used to represent image tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal initializer for initializing all weight matrices.
Attributes:
model_type (`str`):
Type of the model, set to `"aria"`.
image_token_index (`int`):
Index used to represent image tokens.
projector_patch_to_query_dict (`dict`):
Mapping of patch sizes to query dimensions.
vision_config (`AriaVisionConfig`):
Configuration for the vision component.
text_config (`AriaTextConfig`):
Configuration for the text component.
"""
model_type = 'aria'
attribute_map = {'image_token_id': 'image_token_index'}
sub_configs = {'text_config': AriaTextConfig, 'vision_config': AutoConfig}
def __init__(self, vision_config=None, vision_feature_layer: int=-1, text_config: AriaTextConfig=None, projector_patch_to_query_dict: Optional[dict]=None, image_token_index: int=9, initializer_range: float=0.02, **kwargs):
self.image_token_index = image_token_index
if projector_patch_to_query_dict is None:
projector_patch_to_query_dict = {1225: 128, 4900: 256}
self.projector_patch_to_query_dict = {int(k): int(v) for k, v in projector_patch_to_query_dict.items()}
self.max_value_projector_patch_to_query_dict = max(self.projector_patch_to_query_dict.values())
self.vision_feature_layer = vision_feature_layer
if isinstance(vision_config, dict):
vision_config['model_type'] = 'idefics3_vision'
vision_config = CONFIG_MAPPING[vision_config['model_type']](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING['idefics3_vision']()
self.vision_config = vision_config
self.initializer_range = initializer_range
if isinstance(text_config, dict) and 'model_type' in text_config:
text_config = AriaTextConfig(**text_config)
elif text_config is None:
text_config = AriaTextConfig()
self.text_config = text_config
super().__init__(**kwargs)
|
class AriaConfig(PretrainedConfig):
'''
This class handles the configuration for both vision and text components of the Aria model,
as well as additional parameters for image token handling and projector mapping.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`AriaVisionConfig` or `dict`, *optional*):
Configuration for the vision component.
vision_feature_layer (`int`, *optional*, defaults to -1):
The index of the layer to select the vision feature.
text_config (`AriaTextConfig` or `dict`, *optional*):
Configuration for the text component.
projector_patch_to_query_dict (`dict`, *optional*):
Mapping of patch sizes to query dimensions.
image_token_index (`int`, *optional*, defaults to 9):
Index used to represent image tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal initializer for initializing all weight matrices.
Attributes:
model_type (`str`):
Type of the model, set to `"aria"`.
image_token_index (`int`):
Index used to represent image tokens.
projector_patch_to_query_dict (`dict`):
Mapping of patch sizes to query dimensions.
vision_config (`AriaVisionConfig`):
Configuration for the vision component.
text_config (`AriaTextConfig`):
Configuration for the text component.
'''
def __init__(self, vision_config=None, vision_feature_layer: int=-1, text_config: AriaTextConfig=None, projector_patch_to_query_dict: Optional[dict]=None, image_token_index: int=9, initializer_range: float=0.02, **kwargs):
pass
| 2
| 1
| 39
| 5
| 32
| 2
| 6
| 0.97
| 1
| 5
| 1
| 0
| 1
| 7
| 1
| 1
| 79
| 10
| 35
| 20
| 24
| 34
| 21
| 11
| 19
| 6
| 1
| 1
| 6
|
561
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/configuration_aria.py
|
transformers.models.aria.configuration_aria.AriaTextConfig
|
from ...modeling_rope_utils import rope_config_validation
from ...configuration_utils import PretrainedConfig
class AriaTextConfig(PretrainedConfig):
"""
This class handles the configuration for the text component of the Aria model.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
This class extends the LlamaConfig to include additional parameters specific to the Mixture of Experts (MoE) architecture.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4096):
The size of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
Llama 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 2):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_heads
moe_num_experts (`int`, *optional*, defaults to 8):
The number of experts in the MoE layer.
moe_topk (`int`, *optional*, defaults to 2):
The number of top experts to route to for each token.
moe_num_shared_experts (`int`, *optional*, defaults to 2):
The number of shared experts.
"""
model_type = 'aria_text'
keys_to_ignore_at_inference = ['past_key_values']
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
base_config_key = 'text_config'
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size: int=4096, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=2, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, mlp_bias=False, head_dim=None, moe_num_experts: int=8, moe_topk: int=2, moe_num_shared_experts: int=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
if self.rope_scaling is not None and 'type' in self.rope_scaling:
self.rope_scaling['rope_type'] = self.rope_scaling['type']
rope_config_validation(self)
self.moe_num_experts = moe_num_experts
self.moe_topk = moe_topk
self.moe_num_shared_experts = moe_num_shared_experts
|
class AriaTextConfig(PretrainedConfig):
'''
This class handles the configuration for the text component of the Aria model.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
This class extends the LlamaConfig to include additional parameters specific to the Mixture of Experts (MoE) architecture.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4096):
The size of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
Llama 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 2):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_heads
moe_num_experts (`int`, *optional*, defaults to 8):
The number of experts in the MoE layer.
moe_topk (`int`, *optional*, defaults to 2):
The number of top experts to route to for each token.
moe_num_shared_experts (`int`, *optional*, defaults to 2):
The number of shared experts.
'''
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size: int=4096, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=2, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, mlp_bias=False, head_dim=None, moe_num_experts: int=8, moe_topk: int=2, moe_num_shared_experts: int=2, **kwargs):
pass
| 2
| 1
| 67
| 2
| 62
| 3
| 4
| 1.44
| 1
| 2
| 0
| 0
| 1
| 21
| 1
| 1
| 188
| 5
| 75
| 55
| 45
| 108
| 33
| 27
| 31
| 4
| 1
| 1
| 4
|
562
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/image_processing_aria.py
|
transformers.models.aria.image_processing_aria.AriaImageProcessor
|
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_patch_output_size, select_best_resolution
from typing import Optional, Union
from ...utils import TensorType, logging
import numpy as np
from ...image_transforms import PaddingMode, convert_to_rgb, pad, resize, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from collections.abc import Iterable
class AriaImageProcessor(BaseImageProcessor):
"""
A vision processor for the Aria model that handles image preprocessing.
Initialize the AriaImageProcessor.
Args:
image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Mean values for normalization.
image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Standard deviation values for normalization.
max_image_size (`int`, *optional*, defaults to 980):
Maximum image size.
min_image_size (`int`, *optional*, defaults to 336):
Minimum image size.
split_resolutions (`list`, *optional*, defaults to a list of optimal,resolutions as tuples):
The optimal resolutions for splitting the image.
split_image (`bool`, *optional*, defaults to `False`):
Whether to split the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
resample (PILImageResampling, *optional*, defaults to `BICUBIC`):
The resampling filter to use if resizing the image.
"""
model_input_names = ['pixel_values', 'pixel_mask', 'num_crops']
def __init__(self, image_mean: Optional[list[float]]=None, image_std: Optional[list[float]]=None, max_image_size: int=980, min_image_size: int=336, split_resolutions: Optional[list[tuple[int, int]]]=None, split_image: Optional[bool]=False, do_convert_rgb: Optional[bool]=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: Optional[bool]=True, resample: PILImageResampling=PILImageResampling.BICUBIC, **kwargs):
super().__init__(**kwargs)
if image_mean is None:
image_mean = [0.5, 0.5, 0.5]
if image_std is None:
image_std = [0.5, 0.5, 0.5]
self.max_image_size = max_image_size
self.min_image_size = min_image_size
self.image_mean = image_mean
self.image_std = image_std
self.split_image = split_image
if split_resolutions is None:
split_resolutions = [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (2, 4), (2, 3), (2, 2), (2, 1), (3, 1), (3, 2), (4, 1), (4, 2), (5, 1), (6, 1), (7, 1), (8, 1)]
split_resolutions = [(el[0] * 490, el[1] * 490) for el in split_resolutions]
self.split_resolutions = split_resolutions
self.do_convert_rgb = do_convert_rgb
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.resample = resample
def preprocess(self, images: Union[ImageInput, list[ImageInput]], image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, max_image_size: Optional[int]=None, min_image_size: Optional[int]=None, split_image: Optional[bool]=None, do_convert_rgb: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, resample: Optional[PILImageResampling]=None, return_tensors: Optional[Union[str, TensorType]]='pt', data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Process a list of images.
Args:
images (ImageInput or list of ImageInput):
The input image or a list of images.
image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Mean values for normalization.
image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Standard deviation values for normalization.
max_image_size (`int`, *optional*, defaults to `self.max_image_size` (980)):
Maximum image size.
min_image_size (`int`, *optional*, defaults to `self.min_image_size` (336)):
Minimum image size.
split_image (`bool`, *optional*, defaults to `self.split_image` (False)):
Whether to split the image.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb` (True)):
Whether to convert the image to RGB.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize` (True)):
Whether to normalize the image.
resample (PILImageResampling, *optional*, defaults to `self.resample` (BICUBIC)):
The resampling filter to use if resizing the image.
return_tensors (`str` or `TensorType`, *optional*, defaults to "pt"):
The type of tensor to return.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`:
image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`:
image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`:
image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`:
image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
BatchFeature:
A BatchFeature object containing:
- 'pixel_values':
Tensor of processed image pixel values.
- 'pixel_mask':
Boolean pixel mask. This mask is a 2D tensor of shape (max_image_size, max_image_size) where:
- True (1) values indicate pixels that belong to the original resized image.
- False (0) values indicate pixels that are part of the padding.
The mask helps distinguish between actual image content and padded areas in subsequent processing steps.
- 'num_crops':
The maximum number of crops across all images.
"""
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
max_image_size = max_image_size if max_image_size is not None else self.max_image_size
min_image_size = min_image_size if min_image_size is not None else self.min_image_size
split_image = split_image if split_image is not None else self.split_image
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
resample = resample if resample is not None else self.resample
if max_image_size not in [490, 980]:
raise ValueError('max_image_size must be either 490 or 980')
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
pixel_values = []
pixel_masks = []
num_crops = None
for image in images:
if split_image:
crop_images = self.get_image_patches(image, self.split_resolutions, max_image_size, resample, data_format=input_data_format, input_data_format=input_data_format)
else:
crop_images = [image]
if num_crops is None or len(crop_images) > num_crops:
num_crops = len(crop_images)
for crop_image in crop_images:
h, w = get_image_size(crop_image)
scale = max_image_size / max(h, w)
if w >= h:
new_size = (max(int(h * scale), min_image_size), max_image_size)
else:
new_size = (max_image_size, max(int(w * scale), min_image_size))
crop_image_resized = resize(crop_image, new_size, resample=resample, data_format=input_data_format, input_data_format=input_data_format)
padding_bottom, padding_right = (max_image_size - new_size[0], max_image_size - new_size[1])
crop_image_padded = pad(crop_image_resized, ((0, padding_bottom), (0, padding_right)), data_format=input_data_format, input_data_format=input_data_format)
pixel_mask = np.zeros((max_image_size, max_image_size), dtype=bool)
pixel_mask[:new_size[0], :new_size[1]] = 1
pixel_masks.append(pixel_mask)
if do_rescale:
crop_image_padded = self.rescale(image=crop_image_padded, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
crop_image_padded = self.normalize(crop_image_padded, self.image_mean, self.image_std, data_format=input_data_format, input_data_format=input_data_format)
crop_image_padded = to_channel_dimension_format(crop_image_padded, data_format, input_data_format) if data_format is not None else crop_image_padded
pixel_values.append(crop_image_padded)
return BatchFeature(data={'pixel_values': np.stack(pixel_values, axis=0), 'pixel_mask': np.stack(pixel_masks, axis=0), 'num_crops': num_crops}, tensor_type=return_tensors)
def _resize_for_patching(self, image: np.ndarray, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array:
"""
Resizes an image to a target resolution while maintaining aspect ratio.
Args:
image (np.array):
The input image.
target_resolution (tuple):
The target resolution (height, width) of the image.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
np.array: The resized and padded image.
"""
new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)
resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)
return resized_image
def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple):
original_height, original_width = original_resolution
target_height, target_width = target_resolution
paste_x, r_x = divmod(target_width - original_width, 2)
paste_y, r_y = divmod(target_height - original_height, 2)
return ((paste_y, paste_y + r_y), (paste_x, paste_x + r_x))
def _pad_for_patching(self, image: np.ndarray, target_resolution: tuple, input_data_format: ChannelDimension) -> np.array:
"""
Pad an image to a target resolution while maintaining aspect ratio.
"""
new_resolution = get_patch_output_size(image, target_resolution, input_data_format)
padding = self._get_padding_size(new_resolution, target_resolution)
padded_image = self.pad(image, padding=padding)
return padded_image
def pad(self, image: np.ndarray, padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]], mode: PaddingMode=PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]]=0.0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)
dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected
as input.
Args:
image (`np.ndarray`):
The image to pad.
padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`):
Padding to apply to the edges of the height, width axes. Can be one of three formats:
- `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
- `((before, after),)` yields same before and after pad for height and width.
- `(pad,)` or int is a shortcut for before = after = pad width for all axes.
mode (`PaddingMode`):
The padding mode to use. Can be one of:
- `"constant"`: pads with a constant value.
- `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
vector along each axis.
- `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
- `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
`np.ndarray`: The padded image.
"""
if isinstance(padding, int) or len(padding) != 4:
return pad(image, padding, mode, constant_values, data_format, input_data_format)
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
padding_mode_mapping = {PaddingMode.CONSTANT: 'constant', PaddingMode.REFLECT: 'reflect', PaddingMode.REPLICATE: 'edge', PaddingMode.SYMMETRIC: 'symmetric'}
image = np.pad(image, padding, mode=padding_mode_mapping[mode], constant_values=constant_values)
image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
return image
def get_image_patches(self, image: np.ndarray, grid_pinpoints: list[tuple[int, int]], patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> list[np.array]:
"""
Process an image with variable resolutions by dividing it into patches.
Args:
image (`np.array`):
The input image to be processed.
grid_pinpoints (list[tuple[int, int]]):
A list of possible resolutions as tuples.
patch_size (`int`):
Size of the patches to divide the image into.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
data_format (`ChannelDimension` or `str`):
The channel dimension format for the output image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
`list[np.array]`: A list of NumPy arrays containing the processed image patches.
"""
if not isinstance(grid_pinpoints, list):
raise TypeError('grid_pinpoints must be a list of possible resolutions.')
possible_resolutions = grid_pinpoints
image_size = get_image_size(image, channel_dim=input_data_format)
best_resolution = select_best_resolution(image_size, possible_resolutions)
resized_image = self._resize_for_patching(image, best_resolution, resample=resample, input_data_format=input_data_format)
padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)
patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)
patches = [to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format) for patch in patches]
return patches
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
"""
split_image = images_kwargs.get('split_image', self.split_image)
max_image_size = images_kwargs.get('max_image_size', self.max_image_size)
resized_height, resized_width = select_best_resolution((height, width), self.split_resolutions)
num_patches = 1 if not split_image else resized_height // max_image_size * resized_width // max_image_size
return num_patches
|
class AriaImageProcessor(BaseImageProcessor):
'''
A vision processor for the Aria model that handles image preprocessing.
Initialize the AriaImageProcessor.
Args:
image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Mean values for normalization.
image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Standard deviation values for normalization.
max_image_size (`int`, *optional*, defaults to 980):
Maximum image size.
min_image_size (`int`, *optional*, defaults to 336):
Minimum image size.
split_resolutions (`list`, *optional*, defaults to a list of optimal,resolutions as tuples):
The optimal resolutions for splitting the image.
split_image (`bool`, *optional*, defaults to `False`):
Whether to split the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
resample (PILImageResampling, *optional*, defaults to `BICUBIC`):
The resampling filter to use if resizing the image.
'''
def __init__(self, image_mean: Optional[list[float]]=None, image_std: Optional[list[float]]=None, max_image_size: int=980, min_image_size: int=336, split_resolutions: Optional[list[tuple[int, int]]]=None, split_image: Optional[bool]=False, do_convert_rgb: Optional[bool]=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: Optional[bool]=True, resample: PILImageResampling=PILImageResampling.BICUBIC, **kwargs):
pass
def preprocess(self, images: Union[ImageInput, list[ImageInput]], image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, max_image_size: Optional[int]=None, min_image_size: Optional[int]=None, split_image: Optional[bool]=None, do_convert_rgb: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, resample: Optional[PILImageResampling]=None, return_tensors: Optional[Union[str, TensorType]]='pt', data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Process a list of images.
Args:
images (ImageInput or list of ImageInput):
The input image or a list of images.
image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Mean values for normalization.
image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Standard deviation values for normalization.
max_image_size (`int`, *optional*, defaults to `self.max_image_size` (980)):
Maximum image size.
min_image_size (`int`, *optional*, defaults to `self.min_image_size` (336)):
Minimum image size.
split_image (`bool`, *optional*, defaults to `self.split_image` (False)):
Whether to split the image.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb` (True)):
Whether to convert the image to RGB.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize` (True)):
Whether to normalize the image.
resample (PILImageResampling, *optional*, defaults to `self.resample` (BICUBIC)):
The resampling filter to use if resizing the image.
return_tensors (`str` or `TensorType`, *optional*, defaults to "pt"):
The type of tensor to return.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`:
image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`:
image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`:
image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`:
image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
BatchFeature:
A BatchFeature object containing:
- 'pixel_values':
Tensor of processed image pixel values.
- 'pixel_mask':
Boolean pixel mask. This mask is a 2D tensor of shape (max_image_size, max_image_size) where:
- True (1) values indicate pixels that belong to the original resized image.
- False (0) values indicate pixels that are part of the padding.
The mask helps distinguish between actual image content and padded areas in subsequent processing steps.
- 'num_crops':
The maximum number of crops across all images.
'''
pass
def _resize_for_patching(self, image: np.ndarray, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array:
'''
Resizes an image to a target resolution while maintaining aspect ratio.
Args:
image (np.array):
The input image.
target_resolution (tuple):
The target resolution (height, width) of the image.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
np.array: The resized and padded image.
'''
pass
def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple):
pass
def _pad_for_patching(self, image: np.ndarray, target_resolution: tuple, input_data_format: ChannelDimension) -> np.array:
'''
Pad an image to a target resolution while maintaining aspect ratio.
'''
pass
def pad(self, image: np.ndarray, padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]], mode: PaddingMode=PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]]=0.0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)
dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected
as input.
Args:
image (`np.ndarray`):
The image to pad.
padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`):
Padding to apply to the edges of the height, width axes. Can be one of three formats:
- `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
- `((before, after),)` yields same before and after pad for height and width.
- `(pad,)` or int is a shortcut for before = after = pad width for all axes.
mode (`PaddingMode`):
The padding mode to use. Can be one of:
- `"constant"`: pads with a constant value.
- `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
vector along each axis.
- `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
- `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
`np.ndarray`: The padded image.
'''
pass
def get_image_patches(self, image: np.ndarray, grid_pinpoints: list[tuple[int, int]], patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> list[np.array]:
'''
Process an image with variable resolutions by dividing it into patches.
Args:
image (`np.array`):
The input image to be processed.
grid_pinpoints (list[tuple[int, int]]):
A list of possible resolutions as tuples.
patch_size (`int`):
Size of the patches to divide the image into.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
data_format (`ChannelDimension` or `str`):
The channel dimension format for the output image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
`list[np.array]`: A list of NumPy arrays containing the processed image patches.
'''
pass
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
'''
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
'''
pass
| 9
| 7
| 60
| 6
| 33
| 22
| 5
| 0.76
| 1
| 12
| 3
| 0
| 6
| 9
| 6
| 26
| 392
| 44
| 199
| 88
| 146
| 152
| 96
| 42
| 89
| 20
| 3
| 3
| 32
|
563
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaCausalLMOutputWithPast
|
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from dataclasses import dataclass
from typing import Callable, Optional, Union
import torch
from ...cache_utils import Cache, DynamicCache
@dataclass
@auto_docstring(custom_intro='\n Base class for Aria causal language model (or autoregressive) outputs.\n ')
class AriaCausalLMOutputWithPast(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for Aria causal language model (or autoregressive) outputs.\n ')
class AriaCausalLMOutputWithPast(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.57
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 5
| 7
| 7
| 6
| 25
| 7
| 7
| 6
| 0
| 1
| 0
| 0
|
564
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaCrossAttention
|
from .configuration_aria import AriaConfig, AriaTextConfig
from torch import nn
class AriaCrossAttention(nn.Module):
"""
Aria Cross-Attention module.
Args:
config (`AriaConfig`):
The configuration to use.
"""
def __init__(self, config: AriaConfig, dropout_rate: float=0):
super().__init__()
hidden_size = config.vision_config.hidden_size
num_heads = config.vision_config.num_attention_heads
self.num_heads = num_heads
self.q_proj = nn.Linear(hidden_size, hidden_size, bias=False)
self.k_proj = nn.Linear(hidden_size, hidden_size, bias=False)
self.v_proj = nn.Linear(hidden_size, hidden_size, bias=False)
self.multihead_attn = nn.MultiheadAttention(hidden_size, num_heads, batch_first=True)
self.linear = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout_rate)
self.layer_norm = nn.LayerNorm(hidden_size)
self.layer_norm_kv = nn.LayerNorm(hidden_size)
def forward(self, key_value_states, hidden_states, attn_mask=None):
"""
Forward pass of the AriaCrossAttention module.
Args:
key_value_states (`torch.Tensor`):
Input tensor for key and value.
hidden_states (`torch.Tensor`):
Input tensor for query.
attn_mask (`torch.Tensor`, *optional*, defaults to None):
Attention mask.
Returns:
torch.Tensor:
Output tensor after cross-attention.
"""
query = self.q_proj(self.layer_norm(hidden_states))
key_value_states = self.layer_norm_kv(key_value_states)
key = self.k_proj(key_value_states)
value = self.v_proj(key_value_states)
attn_output, _ = self.multihead_attn(query, key, value, attn_mask=attn_mask)
attn_output = self.dropout(self.linear(attn_output))
return attn_output
|
class AriaCrossAttention(nn.Module):
'''
Aria Cross-Attention module.
Args:
config (`AriaConfig`):
The configuration to use.
'''
def __init__(self, config: AriaConfig, dropout_rate: float=0):
pass
def forward(self, key_value_states, hidden_states, attn_mask=None):
'''
Forward pass of the AriaCrossAttention module.
Args:
key_value_states (`torch.Tensor`):
Input tensor for key and value.
hidden_states (`torch.Tensor`):
Input tensor for query.
attn_mask (`torch.Tensor`, *optional*, defaults to None):
Attention mask.
Returns:
torch.Tensor:
Output tensor after cross-attention.
'''
pass
| 3
| 2
| 22
| 4
| 11
| 7
| 1
| 0.91
| 1
| 3
| 1
| 0
| 2
| 9
| 2
| 12
| 53
| 11
| 22
| 18
| 19
| 20
| 22
| 18
| 19
| 1
| 1
| 0
| 2
|
565
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaForConditionalGeneration
|
from ...cache_utils import Cache, DynamicCache
import torch
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from .configuration_aria import AriaConfig, AriaTextConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...generation import GenerationMixin
from torch import nn
@auto_docstring(custom_intro='\n Aria model for conditional generation tasks.\n\n This model combines a vision tower, a multi-modal projector, and a language model\n to perform tasks that involve both image and text inputs.\n ')
class AriaForConditionalGeneration(AriaPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {'^language_model.model': 'model.language_model', '^vision_tower': 'model.vision_tower', '^multi_modal_projector': 'model.multi_modal_projector', '^language_model.lm_head': 'lm_head'}
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config: AriaConfig):
super().__init__(config)
self.model = AriaModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def set_decoder(self, decoder):
self.model.set_decoder(decoder)
def get_decoder(self):
return self.model.get_decoder()
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, vision_feature_layer: int=-1):
return self.model.get_image_features(pixel_values=pixel_values, pixel_mask=pixel_mask, vision_feature_layer=vision_feature_layer)
@property
def language_model(self):
return self.model.language_model
@property
def vision_tower(self):
return self.model.vision_tower
@property
def multi_modal_projector(self):
return self.model.multi_modal_projector
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, AriaCausalLMOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `AriaForConditionalGeneration`).
Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModel
>>> from transformers.image_utils import load_image
>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
>>> processor = AutoProcessor.from_pretrained("Rhymes-AI/Aria")
>>> model = AutoModel.from_pretrained("Rhymes-AI/Aria", dtype=torch.bfloat16, device_map="auto")
>>> # Create inputs
>>> messages = [
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."},
... {"type": "image"},
... {"type": "text", "text": "What can we see in this image?"},
... ]
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In which city is that bridge located?"},
... ]
... }
... ]
>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=256)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> print(generated_texts[0])
Assistant: There are buildings, trees, lights, and water visible in this image.
>>> print(generated_texts[1])
Assistant: The bridge is in San Francisco.
```"""
outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
return AriaCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, pixel_mask=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs):
model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs)
if cache_position[0] == 0:
model_inputs['pixel_values'] = pixel_values
model_inputs['pixel_mask'] = pixel_mask
return model_inputs
|
@auto_docstring(custom_intro='\n Aria model for conditional generation tasks.\n\n This model combines a vision tower, a multi-modal projector, and a language model\n to perform tasks that involve both image and text inputs.\n ')
class AriaForConditionalGeneration(AriaPreTrainedModel, GenerationMixin):
def __init__(self, config: AriaConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_output_embeddings(self) -> nn.Module:
pass
def set_decoder(self, decoder):
pass
def get_decoder(self):
pass
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, vision_feature_layer: int=-1):
pass
@property
def language_model(self):
pass
@property
def vision_tower(self):
pass
@property
def multi_modal_projector(self):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, AriaCausalLMOutputWithPast]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `AriaForConditionalGeneration`).
Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModel
>>> from transformers.image_utils import load_image
>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
>>> processor = AutoProcessor.from_pretrained("Rhymes-AI/Aria")
>>> model = AutoModel.from_pretrained("Rhymes-AI/Aria", dtype=torch.bfloat16, device_map="auto")
>>> # Create inputs
>>> messages = [
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."},
... {"type": "image"},
... {"type": "text", "text": "What can we see in this image?"},
... ]
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In which city is that bridge located?"},
... ]
... }
... ]
>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=256)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> print(generated_texts[0])
Assistant: There are buildings, trees, lights, and water visible in this image.
>>> print(generated_texts[1])
Assistant: The bridge is in San Francisco.
```'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, pixel_mask=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs):
pass
| 19
| 1
| 21
| 2
| 14
| 5
| 2
| 0.34
| 2
| 10
| 5
| 0
| 11
| 6
| 11
| 12
| 250
| 34
| 161
| 74
| 113
| 55
| 77
| 40
| 65
| 11
| 2
| 2
| 25
|
566
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaGroupedExpertsGemm
|
from torch import nn
import torch
class AriaGroupedExpertsGemm(nn.Module):
"""
Grouped GEMM (General Matrix Multiplication) module for efficient expert computation.
This module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm)
for optimized performance. If the grouped_gemm library is not installed, it gracefully
falls back to a sequential GEMM implementation, which may be slower but ensures
functionality.
Args:
in_features (`int`):
Number of input features.
out_features (`int`):
Number of output features.
groups (`int`):
Number of expert groups.
"""
def __init__(self, in_features, out_features, groups):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.groups = groups
self.weight = nn.Parameter(torch.empty(groups, in_features, out_features))
def forward(self, input, tokens_per_expert):
"""
Perform grouped matrix multiplication.
Args:
input (`torch.Tensor`):
Input tensor of shape (num_tokens, in_features).
tokens_per_expert (`torch.Tensor`):
Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor of shape (num_tokens, out_features).
"""
return sequential_experts_gemm(input, self.weight, tokens_per_expert.cpu())
|
class AriaGroupedExpertsGemm(nn.Module):
'''
Grouped GEMM (General Matrix Multiplication) module for efficient expert computation.
This module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm)
for optimized performance. If the grouped_gemm library is not installed, it gracefully
falls back to a sequential GEMM implementation, which may be slower but ensures
functionality.
Args:
in_features (`int`):
Number of input features.
out_features (`int`):
Number of output features.
groups (`int`):
Number of expert groups.
'''
def __init__(self, in_features, out_features, groups):
pass
def forward(self, input, tokens_per_expert):
'''
Perform grouped matrix multiplication.
Args:
input (`torch.Tensor`):
Input tensor of shape (num_tokens, in_features).
tokens_per_expert (`torch.Tensor`):
Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor of shape (num_tokens, out_features).
'''
pass
| 3
| 2
| 12
| 1
| 6
| 5
| 1
| 1.85
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 42
| 5
| 13
| 7
| 10
| 24
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
567
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaGroupedExpertsMLP
|
from .configuration_aria import AriaConfig, AriaTextConfig
from torch import nn
import torch
class AriaGroupedExpertsMLP(nn.Module):
"""
Grouped MLP module for Mixture of Experts.
Args:
config (`AriaTextConfig`):
Configuration object for the model.
"""
def __init__(self, config: AriaTextConfig) -> None:
super().__init__()
self.config = config
self.fc1 = AriaGroupedExpertsGemm(config.hidden_size, config.intermediate_size * 2, config.moe_num_experts)
self.fc2 = AriaGroupedExpertsGemm(config.intermediate_size, config.hidden_size, config.moe_num_experts)
def forward(self, permuted_tokens, tokens_per_expert):
"""
Forward pass of the Grouped MLP.
Args:
permuted_tokens (torch.Tensor): Permuted input tokens.
tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor after passing through the MLP.
"""
fc1_output = self.fc1(permuted_tokens, tokens_per_expert)
projection, gate = torch.chunk(fc1_output, 2, dim=-1)
fc1_output = nn.functional.silu(projection) * gate
fc2_output = self.fc2(fc1_output, tokens_per_expert)
return fc2_output
|
class AriaGroupedExpertsMLP(nn.Module):
'''
Grouped MLP module for Mixture of Experts.
Args:
config (`AriaTextConfig`):
Configuration object for the model.
'''
def __init__(self, config: AriaTextConfig) -> None:
pass
def forward(self, permuted_tokens, tokens_per_expert):
'''
Forward pass of the Grouped MLP.
Args:
permuted_tokens (torch.Tensor): Permuted input tokens.
tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor after passing through the MLP.
'''
pass
| 3
| 2
| 11
| 1
| 6
| 4
| 1
| 1.17
| 1
| 3
| 2
| 0
| 2
| 3
| 2
| 12
| 31
| 5
| 12
| 9
| 9
| 14
| 12
| 9
| 9
| 1
| 1
| 0
| 2
|
568
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaPreTrainedModel
|
from torch import nn
from .configuration_aria import AriaConfig, AriaTextConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
@auto_docstring
class AriaPreTrainedModel(PreTrainedModel):
config: AriaConfig
base_model_prefix = ''
supports_gradient_checkpointing = True
_no_split_modules = ['AriaDecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': AriaTextDecoderLayer, 'attentions': AriaTextAttention}
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, AriaProjector):
nn.init.trunc_normal_(module.query, std=self.config.initializer_range)
|
@auto_docstring
class AriaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 12
| 0
| 12
| 0
| 6
| 0.04
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 26
| 1
| 25
| 15
| 23
| 1
| 23
| 15
| 21
| 6
| 1
| 2
| 6
|
569
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaProjector
|
from torch import nn
from typing import Callable, Optional, Union
import torch
from .configuration_aria import AriaConfig, AriaTextConfig
class AriaProjector(nn.Module):
"""
Aria Projector module.
This module projects vision features into the language model's embedding space, enabling interaction between vision and language components.
Args:
config (`AriaConfig`):
Configuration object for the model.
"""
def __init__(self, config: AriaConfig):
super().__init__()
self.patch_to_query_dict = config.projector_patch_to_query_dict
self.in_features = config.vision_config.hidden_size
self.num_heads = config.vision_config.num_attention_heads
self.kv_dim = config.vision_config.hidden_size
self.hidden_features = config.text_config.hidden_size
self.output_dim = config.text_config.hidden_size
self.query = nn.Parameter(torch.zeros(config.max_value_projector_patch_to_query_dict, self.in_features))
self.cross_attn = AriaCrossAttention(config)
self.layer_norm = nn.LayerNorm(self.in_features)
self.feed_forward = AriaProjectorMLP(self.in_features, self.hidden_features, self.output_dim)
def forward(self, key_value_states: torch.Tensor, attn_mask: Optional[torch.Tensor]=None):
"""
Forward pass of the Projector module.
Args:
key_value_states (`torch.Tensor`):
Input tensor of shape (batch_size, num_patches, kv_dim).
attn_mask (`torch.Tensor`, *optional*, default is None):
Attention mask.
Returns:
`torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim).
"""
batch_size, num_patches = (key_value_states.shape[0], key_value_states.shape[1])
if num_patches not in self.patch_to_query_dict:
raise KeyError(f'Number of patches {num_patches} not found in patch_to_query_dict amongst possible values {self.patch_to_query_dict.keys()}.')
query_num = self.patch_to_query_dict[num_patches]
queries = self.query[:query_num].unsqueeze(0).repeat(batch_size, 1, 1)
if attn_mask is not None:
attn_mask = attn_mask.repeat_interleave(self.num_heads, 0)
attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1)
attention_out = self.cross_attn(key_value_states, queries, attn_mask=attn_mask)
out = self.feed_forward(self.layer_norm(attention_out))
return out
|
class AriaProjector(nn.Module):
'''
Aria Projector module.
This module projects vision features into the language model's embedding space, enabling interaction between vision and language components.
Args:
config (`AriaConfig`):
Configuration object for the model.
'''
def __init__(self, config: AriaConfig):
pass
def forward(self, key_value_states: torch.Tensor, attn_mask: Optional[torch.Tensor]=None):
'''
Forward pass of the Projector module.
Args:
key_value_states (`torch.Tensor`):
Input tensor of shape (batch_size, num_patches, kv_dim).
attn_mask (`torch.Tensor`, *optional*, default is None):
Attention mask.
Returns:
`torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim).
'''
pass
| 3
| 2
| 26
| 6
| 15
| 5
| 2
| 0.57
| 1
| 6
| 3
| 0
| 2
| 10
| 2
| 12
| 63
| 16
| 30
| 21
| 24
| 17
| 25
| 18
| 22
| 3
| 1
| 1
| 4
|
570
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaProjectorMLP
|
from torch import nn
from ...activations import ACT2FN
class AriaProjectorMLP(nn.Module):
"""
Feed-Forward Network module for the Aria Projector.
Args:
in_features (`int`):
Input embedding dimension.
hidden_features (`int`):
Hidden dimension of the feed-forward network.
output_dim (`int`):
Output dimension.
"""
def __init__(self, in_features, hidden_features, output_dim):
super().__init__()
self.linear_in = nn.Linear(in_features, hidden_features, bias=False)
self.linear_out = nn.Linear(hidden_features, output_dim, bias=False)
self.act = ACT2FN['gelu_new']
def forward(self, hidden_states):
hidden_states = self.act(self.linear_in(hidden_states))
hidden_states = self.linear_out(hidden_states)
return hidden_states
|
class AriaProjectorMLP(nn.Module):
'''
Feed-Forward Network module for the Aria Projector.
Args:
in_features (`int`):
Input embedding dimension.
hidden_features (`int`):
Hidden dimension of the feed-forward network.
output_dim (`int`):
Output dimension.
'''
def __init__(self, in_features, hidden_features, output_dim):
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 23
| 3
| 10
| 6
| 7
| 10
| 10
| 6
| 7
| 1
| 1
| 0
| 2
|
571
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaSharedExpertsMLP
|
from torch import nn
from .configuration_aria import AriaConfig, AriaTextConfig
from ...activations import ACT2FN
class AriaSharedExpertsMLP(nn.Module):
"""
Shared Expert MLP for shared experts.
Unlike routed experts, shared experts process all tokens without routing.
This class reconfigures the intermediate size in comparison to the LlamaMLP.
Args:
config (`AriaTextConfig`): Configuration object for the Aria language model.
"""
def __init__(self, config: AriaTextConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class AriaSharedExpertsMLP(nn.Module):
'''
Shared Expert MLP for shared experts.
Unlike routed experts, shared experts process all tokens without routing.
This class reconfigures the intermediate size in comparison to the LlamaMLP.
Args:
config (`AriaTextConfig`): Configuration object for the Aria language model.
'''
def __init__(self, config: AriaTextConfig):
pass
def forward(self, x):
pass
| 3
| 1
| 6
| 0
| 6
| 0
| 1
| 0.54
| 1
| 2
| 1
| 0
| 2
| 7
| 2
| 12
| 24
| 4
| 13
| 11
| 10
| 7
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
572
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaTextAttention
|
from ...processing_utils import Unpack
from torch import nn
from ...utils.deprecation import deprecate_kwarg
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from .configuration_aria import AriaConfig, AriaTextConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
from ...cache_utils import Cache, DynamicCache
class AriaTextAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: AriaTextConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class AriaTextAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: AriaTextConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 1
| 35
| 4
| 31
| 1
| 3
| 0.03
| 1
| 6
| 3
| 0
| 2
| 11
| 2
| 12
| 74
| 9
| 63
| 31
| 52
| 2
| 34
| 23
| 31
| 5
| 1
| 2
| 6
|
573
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaTextDecoderLayer
|
from typing import Callable, Optional, Union
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache
import torch
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from .configuration_aria import AriaConfig, AriaTextConfig
from ...processing_utils import Unpack
from ...modeling_layers import GradientCheckpointingLayer
class AriaTextDecoderLayer(GradientCheckpointingLayer):
"""
Aria Text Decoder Layer.
This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
layer_idx (`int`):
Index of the layer.
"""
def __init__(self, config: AriaTextConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = AriaTextAttention(config=config, layer_idx=layer_idx)
self.mlp = AriaTextMoELayer(config)
self.input_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class AriaTextDecoderLayer(GradientCheckpointingLayer):
'''
Aria Text Decoder Layer.
This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
layer_idx (`int`):
Index of the layer.
'''
def __init__(self, config: AriaTextConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 1
| 25
| 3
| 21
| 2
| 2
| 0.29
| 1
| 10
| 6
| 0
| 2
| 5
| 2
| 12
| 63
| 10
| 42
| 22
| 28
| 12
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
574
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaTextForCausalLM
|
from torch import nn
from .configuration_aria import AriaConfig, AriaTextConfig
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput
from ...processing_utils import Unpack
from ...cache_utils import Cache, DynamicCache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...generation import GenerationMixin
import torch
@auto_docstring
class AriaTextForCausalLM(AriaTextPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
_tp_plan = {'lm_head': 'colwise_rep'}
_pp_plan = {'lm_head': (['hidden_states'], ['logits'])}
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.model = AriaTextModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
"""
Example:
```python
>>> from transformers import AutoTokenizer, AriaTextForCausalLM
>>> model = AriaTextForCausalLM.from_pretrained("meta-aria_text/AriaText-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-aria_text/AriaText-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class AriaTextForCausalLM(AriaTextPreTrainedModel, GenerationMixin):
def __init__(self, config: AriaTextConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
'''
Example:
```python
>>> from transformers import AutoTokenizer, AriaTextForCausalLM
>>> model = AriaTextForCausalLM.from_pretrained("meta-aria_text/AriaText-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-aria_text/AriaText-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```'''
pass
| 5
| 1
| 14
| 2
| 9
| 4
| 2
| 0.48
| 2
| 10
| 5
| 0
| 8
| 3
| 8
| 9
| 135
| 24
| 75
| 37
| 48
| 36
| 37
| 21
| 28
| 8
| 2
| 1
| 15
|
575
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaTextMoELayer
|
from .configuration_aria import AriaConfig, AriaTextConfig
from torch import nn
import torch
class AriaTextMoELayer(nn.Module):
"""
Aria Text Mixture of Experts (MoE) Layer.
This layer applies a gating mechanism to route input tokens to different experts.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
"""
def __init__(self, config: AriaTextConfig):
super().__init__()
self.router = nn.Linear(config.hidden_size, config.moe_num_experts, bias=False)
self.experts = AriaGroupedExpertsMLP(config)
self.shared_experts = AriaSharedExpertsMLP(config)
self.config = config
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the MoE Layer.
Args:
hidden_states (`torch.Tensor`):
Input tensor of shape (batch_size, sequence_length, hidden_size).
Returns:
torch.Tensor: Output tensor after passing through the MoE layer.
Process:
1. Route tokens to experts using the router.
2. Permute tokens based on routing decisions.
3. Process tokens through experts.
4. Unpermute and combine expert outputs.
5. Add shared expert output to the final result.
"""
original_shape = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_states.size(-1))
logits = self.router(hidden_states)
top_logits, top_indices = torch.topk(logits, k=self.config.moe_topk, dim=1)
scores = nn.functional.softmax(top_logits, dim=-1)
original_dtype = top_indices.dtype
tokens_per_expert = torch.histc(top_indices.flatten().to(torch.float32), bins=self.config.moe_num_experts, min=0, max=self.config.moe_num_experts - 1).to(original_dtype)
indices = top_indices
flatten_indices = indices.view(-1)
sorted_indices = torch.argsort(flatten_indices)
permuted_tokens = hidden_states.index_select(0, sorted_indices // self.config.moe_topk)
expert_output = self.experts(permuted_tokens, tokens_per_expert)
unpermuted_tokens = torch.zeros((scores.shape[0] * self.config.moe_topk, expert_output.size(1)), dtype=expert_output.dtype, device=expert_output.device)
unpermuted_tokens.index_copy_(0, sorted_indices, expert_output)
unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1))
output = (unpermuted_tokens * scores.unsqueeze(-1)).sum(dim=1).view(original_shape)
shared_expert_output = self.shared_experts(hidden_states.view(original_shape))
return output + shared_expert_output
|
class AriaTextMoELayer(nn.Module):
'''
Aria Text Mixture of Experts (MoE) Layer.
This layer applies a gating mechanism to route input tokens to different experts.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
'''
def __init__(self, config: AriaTextConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
'''
Forward pass of the MoE Layer.
Args:
hidden_states (`torch.Tensor`):
Input tensor of shape (batch_size, sequence_length, hidden_size).
Returns:
torch.Tensor: Output tensor after passing through the MoE layer.
Process:
1. Route tokens to experts using the router.
2. Permute tokens based on routing decisions.
3. Process tokens through experts.
4. Unpermute and combine expert outputs.
5. Add shared expert output to the final result.
'''
pass
| 3
| 2
| 33
| 6
| 17
| 10
| 1
| 0.74
| 1
| 5
| 3
| 0
| 2
| 4
| 2
| 12
| 77
| 16
| 35
| 21
| 32
| 26
| 26
| 21
| 23
| 1
| 1
| 0
| 2
|
576
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaTextModel
|
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput
from ...cache_utils import Cache, DynamicCache
from .configuration_aria import AriaConfig, AriaTextConfig
from torch import nn
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import check_model_inputs
from ...masking_utils import create_causal_mask
from ...processing_utils import Unpack
import torch
from typing import Callable, Optional, Union
@auto_docstring
class AriaTextModel(AriaTextPreTrainedModel):
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([AriaTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = AriaTextRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
@auto_docstring
class AriaTextModel(AriaTextPreTrainedModel):
def __init__(self, config: AriaTextConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 6
| 0
| 40
| 5
| 30
| 6
| 6
| 0.22
| 1
| 16
| 10
| 0
| 5
| 7
| 6
| 7
| 257
| 34
| 184
| 65
| 146
| 40
| 89
| 34
| 82
| 21
| 2
| 2
| 37
|
577
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaTextPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from .configuration_aria import AriaConfig, AriaTextConfig
@auto_docstring
class AriaTextPreTrainedModel(PreTrainedModel):
config: AriaTextConfig
base_model_prefix = 'model'
_no_split_modules = ['AriaTextDecoderLayer', 'AriaGroupedExpertsGemm']
supports_gradient_checkpointing = True
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = True
_supports_sdpa = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': AriaTextDecoderLayer, 'attentions': AriaTextAttention}
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, AriaGroupedExpertsGemm):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
@auto_docstring
class AriaTextPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 16
| 0
| 16
| 0
| 8
| 0.12
| 1
| 1
| 1
| 2
| 1
| 0
| 1
| 1
| 30
| 2
| 25
| 11
| 23
| 3
| 22
| 11
| 20
| 8
| 1
| 2
| 8
|
578
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaTextRMSNorm
|
import torch
from ...integrations import use_kernel_forward_from_hub
from torch import nn
@use_kernel_forward_from_hub('RMSNorm')
class AriaTextRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
AriaTextRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
@use_kernel_forward_from_hub('RMSNorm')
class AriaTextRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
AriaTextRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 5
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
579
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modeling_aria.py
|
transformers.models.aria.modeling_aria.AriaTextRotaryEmbedding
|
from .configuration_aria import AriaConfig, AriaTextConfig
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
from torch import nn
class AriaTextRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: AriaTextConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class AriaTextRotaryEmbedding(nn.Module):
def __init__(self, config: AriaTextConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
580
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaCausalLMOutputWithPast
|
from ..llava.modeling_llava import LlavaCausalLMOutputWithPast, LlavaForConditionalGeneration, LlavaModel, LlavaModelOutputWithPast
class AriaCausalLMOutputWithPast(LlavaCausalLMOutputWithPast):
pass
|
class AriaCausalLMOutputWithPast(LlavaCausalLMOutputWithPast):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
581
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaConfig
|
from ...configuration_utils import PretrainedConfig
from typing import Optional, Union
from ..auto import CONFIG_MAPPING, AutoConfig, AutoTokenizer
class AriaConfig(PretrainedConfig):
"""
This class handles the configuration for both vision and text components of the Aria model,
as well as additional parameters for image token handling and projector mapping.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`AriaVisionConfig` or `dict`, *optional*):
Configuration for the vision component.
vision_feature_layer (`int`, *optional*, defaults to -1):
The index of the layer to select the vision feature.
text_config (`AriaTextConfig` or `dict`, *optional*):
Configuration for the text component.
projector_patch_to_query_dict (`dict`, *optional*):
Mapping of patch sizes to query dimensions.
image_token_index (`int`, *optional*, defaults to 9):
Index used to represent image tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal initializer for initializing all weight matrices.
Attributes:
model_type (`str`):
Type of the model, set to `"aria"`.
image_token_index (`int`):
Index used to represent image tokens.
projector_patch_to_query_dict (`dict`):
Mapping of patch sizes to query dimensions.
vision_config (`AriaVisionConfig`):
Configuration for the vision component.
text_config (`AriaTextConfig`):
Configuration for the text component.
"""
model_type = 'aria'
attribute_map = {'image_token_id': 'image_token_index'}
sub_configs = {'text_config': AriaTextConfig, 'vision_config': AutoConfig}
def __init__(self, vision_config=None, vision_feature_layer: int=-1, text_config: AriaTextConfig=None, projector_patch_to_query_dict: Optional[dict]=None, image_token_index: int=9, initializer_range: float=0.02, **kwargs):
self.image_token_index = image_token_index
if projector_patch_to_query_dict is None:
projector_patch_to_query_dict = {1225: 128, 4900: 256}
self.projector_patch_to_query_dict = {int(k): int(v) for k, v in projector_patch_to_query_dict.items()}
self.max_value_projector_patch_to_query_dict = max(self.projector_patch_to_query_dict.values())
self.vision_feature_layer = vision_feature_layer
if isinstance(vision_config, dict):
vision_config['model_type'] = 'idefics3_vision'
vision_config = CONFIG_MAPPING[vision_config['model_type']](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING['idefics3_vision']()
self.vision_config = vision_config
self.initializer_range = initializer_range
if isinstance(text_config, dict) and 'model_type' in text_config:
text_config = AriaTextConfig(**text_config)
elif text_config is None:
text_config = AriaTextConfig()
self.text_config = text_config
super().__init__(**kwargs)
|
class AriaConfig(PretrainedConfig):
'''
This class handles the configuration for both vision and text components of the Aria model,
as well as additional parameters for image token handling and projector mapping.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`AriaVisionConfig` or `dict`, *optional*):
Configuration for the vision component.
vision_feature_layer (`int`, *optional*, defaults to -1):
The index of the layer to select the vision feature.
text_config (`AriaTextConfig` or `dict`, *optional*):
Configuration for the text component.
projector_patch_to_query_dict (`dict`, *optional*):
Mapping of patch sizes to query dimensions.
image_token_index (`int`, *optional*, defaults to 9):
Index used to represent image tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal initializer for initializing all weight matrices.
Attributes:
model_type (`str`):
Type of the model, set to `"aria"`.
image_token_index (`int`):
Index used to represent image tokens.
projector_patch_to_query_dict (`dict`):
Mapping of patch sizes to query dimensions.
vision_config (`AriaVisionConfig`):
Configuration for the vision component.
text_config (`AriaTextConfig`):
Configuration for the text component.
'''
def __init__(self, vision_config=None, vision_feature_layer: int=-1, text_config: AriaTextConfig=None, projector_patch_to_query_dict: Optional[dict]=None, image_token_index: int=9, initializer_range: float=0.02, **kwargs):
pass
| 2
| 1
| 39
| 5
| 32
| 2
| 6
| 0.97
| 1
| 5
| 1
| 0
| 1
| 7
| 1
| 33
| 79
| 10
| 35
| 20
| 24
| 34
| 21
| 11
| 19
| 6
| 2
| 1
| 6
|
582
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaCrossAttention
|
from torch import nn
class AriaCrossAttention(nn.Module):
"""
Aria Cross-Attention module.
Args:
config (`AriaConfig`):
The configuration to use.
"""
def __init__(self, config: AriaConfig, dropout_rate: float=0):
super().__init__()
hidden_size = config.vision_config.hidden_size
num_heads = config.vision_config.num_attention_heads
self.num_heads = num_heads
self.q_proj = nn.Linear(hidden_size, hidden_size, bias=False)
self.k_proj = nn.Linear(hidden_size, hidden_size, bias=False)
self.v_proj = nn.Linear(hidden_size, hidden_size, bias=False)
self.multihead_attn = nn.MultiheadAttention(hidden_size, num_heads, batch_first=True)
self.linear = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout_rate)
self.layer_norm = nn.LayerNorm(hidden_size)
self.layer_norm_kv = nn.LayerNorm(hidden_size)
def forward(self, key_value_states, hidden_states, attn_mask=None):
"""
Forward pass of the AriaCrossAttention module.
Args:
key_value_states (`torch.Tensor`):
Input tensor for key and value.
hidden_states (`torch.Tensor`):
Input tensor for query.
attn_mask (`torch.Tensor`, *optional*, defaults to None):
Attention mask.
Returns:
torch.Tensor:
Output tensor after cross-attention.
"""
query = self.q_proj(self.layer_norm(hidden_states))
key_value_states = self.layer_norm_kv(key_value_states)
key = self.k_proj(key_value_states)
value = self.v_proj(key_value_states)
attn_output, _ = self.multihead_attn(query, key, value, attn_mask=attn_mask)
attn_output = self.dropout(self.linear(attn_output))
return attn_output
|
class AriaCrossAttention(nn.Module):
'''
Aria Cross-Attention module.
Args:
config (`AriaConfig`):
The configuration to use.
'''
def __init__(self, config: AriaConfig, dropout_rate: float=0):
pass
def forward(self, key_value_states, hidden_states, attn_mask=None):
'''
Forward pass of the AriaCrossAttention module.
Args:
key_value_states (`torch.Tensor`):
Input tensor for key and value.
hidden_states (`torch.Tensor`):
Input tensor for query.
attn_mask (`torch.Tensor`, *optional*, defaults to None):
Attention mask.
Returns:
torch.Tensor:
Output tensor after cross-attention.
'''
pass
| 3
| 2
| 22
| 4
| 11
| 7
| 1
| 0.91
| 1
| 3
| 1
| 0
| 2
| 9
| 2
| 12
| 53
| 11
| 22
| 18
| 19
| 20
| 22
| 18
| 19
| 1
| 1
| 0
| 2
|
583
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaForConditionalGeneration
|
from ..llava.modeling_llava import LlavaCausalLMOutputWithPast, LlavaForConditionalGeneration, LlavaModel, LlavaModelOutputWithPast
from typing import Optional, Union
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...utils import TensorType, TransformersKwargs, auto_docstring, can_return_tuple, logging
import torch
from ...cache_utils import Cache
@auto_docstring(custom_intro='\n Aria model for conditional generation tasks.\n\n This model combines a vision tower, a multi-modal projector, and a language model\n to perform tasks that involve both image and text inputs.\n ')
class AriaForConditionalGeneration(LlavaForConditionalGeneration):
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, vision_feature_layer: int=-1):
return self.model.get_image_features(pixel_values=pixel_values, pixel_mask=pixel_mask, vision_feature_layer=vision_feature_layer)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, AriaCausalLMOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `AriaForConditionalGeneration`).
Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModel
>>> from transformers.image_utils import load_image
>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
>>> processor = AutoProcessor.from_pretrained("Rhymes-AI/Aria")
>>> model = AutoModel.from_pretrained("Rhymes-AI/Aria", dtype=torch.bfloat16, device_map="auto")
>>> # Create inputs
>>> messages = [
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."},
... {"type": "image"},
... {"type": "text", "text": "What can we see in this image?"},
... ]
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In which city is that bridge located?"},
... ]
... }
... ]
>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=256)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> print(generated_texts[0])
Assistant: There are buildings, trees, lights, and water visible in this image.
>>> print(generated_texts[1])
Assistant: The bridge is in San Francisco.
```"""
outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
return AriaCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, pixel_mask=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs):
model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs)
if cache_position[0] == 0:
model_inputs['pixel_values'] = pixel_values
model_inputs['pixel_mask'] = pixel_mask
return model_inputs
|
@auto_docstring(custom_intro='\n Aria model for conditional generation tasks.\n\n This model combines a vision tower, a multi-modal projector, and a language model\n to perform tasks that involve both image and text inputs.\n ')
class AriaForConditionalGeneration(LlavaForConditionalGeneration):
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, vision_feature_layer: int=-1):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, AriaCausalLMOutputWithPast]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `AriaForConditionalGeneration`).
Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModel
>>> from transformers.image_utils import load_image
>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
>>> processor = AutoProcessor.from_pretrained("Rhymes-AI/Aria")
>>> model = AutoModel.from_pretrained("Rhymes-AI/Aria", dtype=torch.bfloat16, device_map="auto")
>>> # Create inputs
>>> messages = [
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."},
... {"type": "image"},
... {"type": "text", "text": "What can we see in this image?"},
... ]
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In which city is that bridge located?"},
... ]
... }
... ]
>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=256)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> print(generated_texts[0])
Assistant: There are buildings, trees, lights, and water visible in this image.
>>> print(generated_texts[1])
Assistant: The bridge is in San Francisco.
```'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, pixel_mask=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs):
pass
| 7
| 1
| 21
| 2
| 14
| 5
| 2
| 0.34
| 2
| 10
| 5
| 0
| 11
| 6
| 11
| 51
| 250
| 34
| 161
| 74
| 113
| 55
| 77
| 40
| 65
| 11
| 3
| 2
| 25
|
584
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaGroupedExpertsGemm
|
import torch
from torch import nn
class AriaGroupedExpertsGemm(nn.Module):
"""
Grouped GEMM (General Matrix Multiplication) module for efficient expert computation.
This module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm)
for optimized performance. If the grouped_gemm library is not installed, it gracefully
falls back to a sequential GEMM implementation, which may be slower but ensures
functionality.
Args:
in_features (`int`):
Number of input features.
out_features (`int`):
Number of output features.
groups (`int`):
Number of expert groups.
"""
def __init__(self, in_features, out_features, groups):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.groups = groups
self.weight = nn.Parameter(torch.empty(groups, in_features, out_features))
def forward(self, input, tokens_per_expert):
"""
Perform grouped matrix multiplication.
Args:
input (`torch.Tensor`):
Input tensor of shape (num_tokens, in_features).
tokens_per_expert (`torch.Tensor`):
Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor of shape (num_tokens, out_features).
"""
return sequential_experts_gemm(input, self.weight, tokens_per_expert.cpu())
|
class AriaGroupedExpertsGemm(nn.Module):
'''
Grouped GEMM (General Matrix Multiplication) module for efficient expert computation.
This module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm)
for optimized performance. If the grouped_gemm library is not installed, it gracefully
falls back to a sequential GEMM implementation, which may be slower but ensures
functionality.
Args:
in_features (`int`):
Number of input features.
out_features (`int`):
Number of output features.
groups (`int`):
Number of expert groups.
'''
def __init__(self, in_features, out_features, groups):
pass
def forward(self, input, tokens_per_expert):
'''
Perform grouped matrix multiplication.
Args:
input (`torch.Tensor`):
Input tensor of shape (num_tokens, in_features).
tokens_per_expert (`torch.Tensor`):
Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor of shape (num_tokens, out_features).
'''
pass
| 3
| 2
| 12
| 1
| 6
| 5
| 1
| 1.85
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 42
| 5
| 13
| 7
| 10
| 24
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
585
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaGroupedExpertsMLP
|
from torch import nn
import torch
class AriaGroupedExpertsMLP(nn.Module):
"""
Grouped MLP module for Mixture of Experts.
Args:
config (`AriaTextConfig`):
Configuration object for the model.
"""
def __init__(self, config: AriaTextConfig) -> None:
super().__init__()
self.config = config
self.fc1 = AriaGroupedExpertsGemm(config.hidden_size, config.intermediate_size * 2, config.moe_num_experts)
self.fc2 = AriaGroupedExpertsGemm(config.intermediate_size, config.hidden_size, config.moe_num_experts)
def forward(self, permuted_tokens, tokens_per_expert):
"""
Forward pass of the Grouped MLP.
Args:
permuted_tokens (torch.Tensor): Permuted input tokens.
tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor after passing through the MLP.
"""
fc1_output = self.fc1(permuted_tokens, tokens_per_expert)
projection, gate = torch.chunk(fc1_output, 2, dim=-1)
fc1_output = nn.functional.silu(projection) * gate
fc2_output = self.fc2(fc1_output, tokens_per_expert)
return fc2_output
|
class AriaGroupedExpertsMLP(nn.Module):
'''
Grouped MLP module for Mixture of Experts.
Args:
config (`AriaTextConfig`):
Configuration object for the model.
'''
def __init__(self, config: AriaTextConfig) -> None:
pass
def forward(self, permuted_tokens, tokens_per_expert):
'''
Forward pass of the Grouped MLP.
Args:
permuted_tokens (torch.Tensor): Permuted input tokens.
tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor after passing through the MLP.
'''
pass
| 3
| 2
| 11
| 1
| 6
| 4
| 1
| 1.17
| 1
| 3
| 2
| 0
| 2
| 3
| 2
| 12
| 31
| 5
| 12
| 9
| 9
| 14
| 12
| 9
| 9
| 1
| 1
| 0
| 2
|
586
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaImageProcessor
|
from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from typing import Optional, Union
from ..llava_next.image_processing_llava_next import divide_to_patches
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_patch_output_size, select_best_resolution
from ...utils import TensorType, TransformersKwargs, auto_docstring, can_return_tuple, logging
from collections.abc import Iterable
from ...image_transforms import PaddingMode, convert_to_rgb, pad, resize, to_channel_dimension_format
import numpy as np
class AriaImageProcessor(BaseImageProcessor):
"""
A vision processor for the Aria model that handles image preprocessing.
Initialize the AriaImageProcessor.
Args:
image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Mean values for normalization.
image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Standard deviation values for normalization.
max_image_size (`int`, *optional*, defaults to 980):
Maximum image size.
min_image_size (`int`, *optional*, defaults to 336):
Minimum image size.
split_resolutions (`list`, *optional*, defaults to a list of optimal,resolutions as tuples):
The optimal resolutions for splitting the image.
split_image (`bool`, *optional*, defaults to `False`):
Whether to split the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
resample (PILImageResampling, *optional*, defaults to `BICUBIC`):
The resampling filter to use if resizing the image.
"""
model_input_names = ['pixel_values', 'pixel_mask', 'num_crops']
def __init__(self, image_mean: Optional[list[float]]=None, image_std: Optional[list[float]]=None, max_image_size: int=980, min_image_size: int=336, split_resolutions: Optional[list[tuple[int, int]]]=None, split_image: Optional[bool]=False, do_convert_rgb: Optional[bool]=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: Optional[bool]=True, resample: PILImageResampling=PILImageResampling.BICUBIC, **kwargs):
super().__init__(**kwargs)
if image_mean is None:
image_mean = [0.5, 0.5, 0.5]
if image_std is None:
image_std = [0.5, 0.5, 0.5]
self.max_image_size = max_image_size
self.min_image_size = min_image_size
self.image_mean = image_mean
self.image_std = image_std
self.split_image = split_image
if split_resolutions is None:
split_resolutions = [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (2, 4), (2, 3), (2, 2), (2, 1), (3, 1), (3, 2), (4, 1), (4, 2), (5, 1), (6, 1), (7, 1), (8, 1)]
split_resolutions = [(el[0] * 490, el[1] * 490) for el in split_resolutions]
self.split_resolutions = split_resolutions
self.do_convert_rgb = do_convert_rgb
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.resample = resample
def preprocess(self, images: Union[ImageInput, list[ImageInput]], image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, max_image_size: Optional[int]=None, min_image_size: Optional[int]=None, split_image: Optional[bool]=None, do_convert_rgb: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, resample: Optional[PILImageResampling]=None, return_tensors: Optional[Union[str, TensorType]]='pt', data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Process a list of images.
Args:
images (ImageInput or list of ImageInput):
The input image or a list of images.
image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Mean values for normalization.
image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Standard deviation values for normalization.
max_image_size (`int`, *optional*, defaults to `self.max_image_size` (980)):
Maximum image size.
min_image_size (`int`, *optional*, defaults to `self.min_image_size` (336)):
Minimum image size.
split_image (`bool`, *optional*, defaults to `self.split_image` (False)):
Whether to split the image.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb` (True)):
Whether to convert the image to RGB.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize` (True)):
Whether to normalize the image.
resample (PILImageResampling, *optional*, defaults to `self.resample` (BICUBIC)):
The resampling filter to use if resizing the image.
return_tensors (`str` or `TensorType`, *optional*, defaults to "pt"):
The type of tensor to return.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`:
image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`:
image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`:
image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`:
image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
BatchFeature:
A BatchFeature object containing:
- 'pixel_values':
Tensor of processed image pixel values.
- 'pixel_mask':
Boolean pixel mask. This mask is a 2D tensor of shape (max_image_size, max_image_size) where:
- True (1) values indicate pixels that belong to the original resized image.
- False (0) values indicate pixels that are part of the padding.
The mask helps distinguish between actual image content and padded areas in subsequent processing steps.
- 'num_crops':
The maximum number of crops across all images.
"""
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
max_image_size = max_image_size if max_image_size is not None else self.max_image_size
min_image_size = min_image_size if min_image_size is not None else self.min_image_size
split_image = split_image if split_image is not None else self.split_image
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
resample = resample if resample is not None else self.resample
if max_image_size not in [490, 980]:
raise ValueError('max_image_size must be either 490 or 980')
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
pixel_values = []
pixel_masks = []
num_crops = None
for image in images:
if split_image:
crop_images = self.get_image_patches(image, self.split_resolutions, max_image_size, resample, data_format=input_data_format, input_data_format=input_data_format)
else:
crop_images = [image]
if num_crops is None or len(crop_images) > num_crops:
num_crops = len(crop_images)
for crop_image in crop_images:
h, w = get_image_size(crop_image)
scale = max_image_size / max(h, w)
if w >= h:
new_size = (max(int(h * scale), min_image_size), max_image_size)
else:
new_size = (max_image_size, max(int(w * scale), min_image_size))
crop_image_resized = resize(crop_image, new_size, resample=resample, data_format=input_data_format, input_data_format=input_data_format)
padding_bottom, padding_right = (max_image_size - new_size[0], max_image_size - new_size[1])
crop_image_padded = pad(crop_image_resized, ((0, padding_bottom), (0, padding_right)), data_format=input_data_format, input_data_format=input_data_format)
pixel_mask = np.zeros((max_image_size, max_image_size), dtype=bool)
pixel_mask[:new_size[0], :new_size[1]] = 1
pixel_masks.append(pixel_mask)
if do_rescale:
crop_image_padded = self.rescale(image=crop_image_padded, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
crop_image_padded = self.normalize(crop_image_padded, self.image_mean, self.image_std, data_format=input_data_format, input_data_format=input_data_format)
crop_image_padded = to_channel_dimension_format(crop_image_padded, data_format, input_data_format) if data_format is not None else crop_image_padded
pixel_values.append(crop_image_padded)
return BatchFeature(data={'pixel_values': np.stack(pixel_values, axis=0), 'pixel_mask': np.stack(pixel_masks, axis=0), 'num_crops': num_crops}, tensor_type=return_tensors)
def _resize_for_patching(self, image: np.ndarray, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array:
"""
Resizes an image to a target resolution while maintaining aspect ratio.
Args:
image (np.array):
The input image.
target_resolution (tuple):
The target resolution (height, width) of the image.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
np.array: The resized and padded image.
"""
new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)
resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)
return resized_image
def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple):
original_height, original_width = original_resolution
target_height, target_width = target_resolution
paste_x, r_x = divmod(target_width - original_width, 2)
paste_y, r_y = divmod(target_height - original_height, 2)
return ((paste_y, paste_y + r_y), (paste_x, paste_x + r_x))
def _pad_for_patching(self, image: np.ndarray, target_resolution: tuple, input_data_format: ChannelDimension) -> np.array:
"""
Pad an image to a target resolution while maintaining aspect ratio.
"""
new_resolution = get_patch_output_size(image, target_resolution, input_data_format)
padding = self._get_padding_size(new_resolution, target_resolution)
padded_image = self.pad(image, padding=padding)
return padded_image
def pad(self, image: np.ndarray, padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]], mode: PaddingMode=PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]]=0.0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)
dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected
as input.
Args:
image (`np.ndarray`):
The image to pad.
padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`):
Padding to apply to the edges of the height, width axes. Can be one of three formats:
- `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
- `((before, after),)` yields same before and after pad for height and width.
- `(pad,)` or int is a shortcut for before = after = pad width for all axes.
mode (`PaddingMode`):
The padding mode to use. Can be one of:
- `"constant"`: pads with a constant value.
- `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
vector along each axis.
- `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
- `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
`np.ndarray`: The padded image.
"""
if isinstance(padding, int) or len(padding) != 4:
return pad(image, padding, mode, constant_values, data_format, input_data_format)
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
padding_mode_mapping = {PaddingMode.CONSTANT: 'constant', PaddingMode.REFLECT: 'reflect', PaddingMode.REPLICATE: 'edge', PaddingMode.SYMMETRIC: 'symmetric'}
image = np.pad(image, padding, mode=padding_mode_mapping[mode], constant_values=constant_values)
image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
return image
def get_image_patches(self, image: np.ndarray, grid_pinpoints: list[tuple[int, int]], patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> list[np.array]:
"""
Process an image with variable resolutions by dividing it into patches.
Args:
image (`np.array`):
The input image to be processed.
grid_pinpoints (list[tuple[int, int]]):
A list of possible resolutions as tuples.
patch_size (`int`):
Size of the patches to divide the image into.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
data_format (`ChannelDimension` or `str`):
The channel dimension format for the output image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
`list[np.array]`: A list of NumPy arrays containing the processed image patches.
"""
if not isinstance(grid_pinpoints, list):
raise TypeError('grid_pinpoints must be a list of possible resolutions.')
possible_resolutions = grid_pinpoints
image_size = get_image_size(image, channel_dim=input_data_format)
best_resolution = select_best_resolution(image_size, possible_resolutions)
resized_image = self._resize_for_patching(image, best_resolution, resample=resample, input_data_format=input_data_format)
padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)
patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)
patches = [to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format) for patch in patches]
return patches
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
"""
split_image = images_kwargs.get('split_image', self.split_image)
max_image_size = images_kwargs.get('max_image_size', self.max_image_size)
resized_height, resized_width = select_best_resolution((height, width), self.split_resolutions)
num_patches = 1 if not split_image else resized_height // max_image_size * resized_width // max_image_size
return num_patches
|
class AriaImageProcessor(BaseImageProcessor):
'''
A vision processor for the Aria model that handles image preprocessing.
Initialize the AriaImageProcessor.
Args:
image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Mean values for normalization.
image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Standard deviation values for normalization.
max_image_size (`int`, *optional*, defaults to 980):
Maximum image size.
min_image_size (`int`, *optional*, defaults to 336):
Minimum image size.
split_resolutions (`list`, *optional*, defaults to a list of optimal,resolutions as tuples):
The optimal resolutions for splitting the image.
split_image (`bool`, *optional*, defaults to `False`):
Whether to split the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
resample (PILImageResampling, *optional*, defaults to `BICUBIC`):
The resampling filter to use if resizing the image.
'''
def __init__(self, image_mean: Optional[list[float]]=None, image_std: Optional[list[float]]=None, max_image_size: int=980, min_image_size: int=336, split_resolutions: Optional[list[tuple[int, int]]]=None, split_image: Optional[bool]=False, do_convert_rgb: Optional[bool]=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: Optional[bool]=True, resample: PILImageResampling=PILImageResampling.BICUBIC, **kwargs):
pass
def preprocess(self, images: Union[ImageInput, list[ImageInput]], image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, max_image_size: Optional[int]=None, min_image_size: Optional[int]=None, split_image: Optional[bool]=None, do_convert_rgb: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, resample: Optional[PILImageResampling]=None, return_tensors: Optional[Union[str, TensorType]]='pt', data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Process a list of images.
Args:
images (ImageInput or list of ImageInput):
The input image or a list of images.
image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Mean values for normalization.
image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Standard deviation values for normalization.
max_image_size (`int`, *optional*, defaults to `self.max_image_size` (980)):
Maximum image size.
min_image_size (`int`, *optional*, defaults to `self.min_image_size` (336)):
Minimum image size.
split_image (`bool`, *optional*, defaults to `self.split_image` (False)):
Whether to split the image.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb` (True)):
Whether to convert the image to RGB.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize` (True)):
Whether to normalize the image.
resample (PILImageResampling, *optional*, defaults to `self.resample` (BICUBIC)):
The resampling filter to use if resizing the image.
return_tensors (`str` or `TensorType`, *optional*, defaults to "pt"):
The type of tensor to return.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`:
image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`:
image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`:
image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`:
image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
BatchFeature:
A BatchFeature object containing:
- 'pixel_values':
Tensor of processed image pixel values.
- 'pixel_mask':
Boolean pixel mask. This mask is a 2D tensor of shape (max_image_size, max_image_size) where:
- True (1) values indicate pixels that belong to the original resized image.
- False (0) values indicate pixels that are part of the padding.
The mask helps distinguish between actual image content and padded areas in subsequent processing steps.
- 'num_crops':
The maximum number of crops across all images.
'''
pass
def _resize_for_patching(self, image: np.ndarray, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array:
'''
Resizes an image to a target resolution while maintaining aspect ratio.
Args:
image (np.array):
The input image.
target_resolution (tuple):
The target resolution (height, width) of the image.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
np.array: The resized and padded image.
'''
pass
def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple):
pass
def _pad_for_patching(self, image: np.ndarray, target_resolution: tuple, input_data_format: ChannelDimension) -> np.array:
'''
Pad an image to a target resolution while maintaining aspect ratio.
'''
pass
def pad(self, image: np.ndarray, padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]], mode: PaddingMode=PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]]=0.0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)
dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected
as input.
Args:
image (`np.ndarray`):
The image to pad.
padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`):
Padding to apply to the edges of the height, width axes. Can be one of three formats:
- `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
- `((before, after),)` yields same before and after pad for height and width.
- `(pad,)` or int is a shortcut for before = after = pad width for all axes.
mode (`PaddingMode`):
The padding mode to use. Can be one of:
- `"constant"`: pads with a constant value.
- `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
vector along each axis.
- `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
- `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
`np.ndarray`: The padded image.
'''
pass
def get_image_patches(self, image: np.ndarray, grid_pinpoints: list[tuple[int, int]], patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> list[np.array]:
'''
Process an image with variable resolutions by dividing it into patches.
Args:
image (`np.array`):
The input image to be processed.
grid_pinpoints (list[tuple[int, int]]):
A list of possible resolutions as tuples.
patch_size (`int`):
Size of the patches to divide the image into.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
data_format (`ChannelDimension` or `str`):
The channel dimension format for the output image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
`list[np.array]`: A list of NumPy arrays containing the processed image patches.
'''
pass
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
'''
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
'''
pass
| 9
| 7
| 60
| 6
| 33
| 22
| 5
| 0.76
| 1
| 13
| 4
| 0
| 6
| 9
| 6
| 26
| 392
| 44
| 199
| 88
| 146
| 152
| 96
| 42
| 89
| 20
| 3
| 3
| 32
|
587
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaPreTrainedModel
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm
from torch import nn
from ...modeling_utils import PreTrainedModel
class AriaPreTrainedModel(LlamaPreTrainedModel):
config: AriaConfig
base_model_prefix = ''
_can_compile_fullgraph = False
_supports_attention_backend = True
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, AriaProjector):
nn.init.trunc_normal_(module.query, std=self.config.initializer_range)
|
class AriaPreTrainedModel(LlamaPreTrainedModel):
def _init_weights(self, module):
pass
| 2
| 0
| 12
| 0
| 12
| 0
| 6
| 0.07
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 2
| 16
| 1
| 15
| 5
| 13
| 1
| 13
| 5
| 11
| 6
| 2
| 2
| 6
|
588
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaProcessor
|
import numpy as np
from ..auto import CONFIG_MAPPING, AutoConfig, AutoTokenizer
from typing import Optional, Union
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_patch_output_size, select_best_resolution
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils import PreTokenizedInput, TextInput
from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
class AriaProcessor(ProcessorMixin):
"""
AriaProcessor is a processor for the Aria model which wraps the Aria image preprocessor and the LLama slow tokenizer.
Args:
image_processor (`AriaImageProcessor`, *optional*):
The AriaImageProcessor to use for image preprocessing.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
chat_template (`str`, *optional*):
A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string.
size_conversion (`Dict`, *optional*):
A dictionary indicating size conversions for images.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'AriaImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor=None, tokenizer: Union[AutoTokenizer, str]=None, chat_template: Optional[str]=None, size_conversion: Optional[dict[Union[float, int], int]]=None):
if size_conversion is None:
size_conversion = {490: 128, 980: 256}
self.size_conversion = {int(k): v for k, v in size_conversion.items()}
self.image_token = tokenizer.image_token
self.image_token_id = tokenizer.image_token_id
if tokenizer is not None and tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.unk_token
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], images: Optional[ImageInput]=None, audio=None, videos=None, **kwargs: Unpack[AriaProcessorKwargs]) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s).
Args:
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_mask** -- Pixel mask to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(AriaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and (not isinstance(text[0], str)):
raise TypeError('Invalid input text. Please provide a string, or a list of strings')
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs['images_kwargs'])
tokens_per_image = self.size_conversion[image_inputs.pixel_values.shape[2]]
prompt_strings = []
num_crops = image_inputs.pop('num_crops') * tokens_per_image
for sample in text:
sample = sample.replace(self.tokenizer.image_token, self.tokenizer.image_token * num_crops)
prompt_strings.append(sample)
else:
image_inputs = {}
prompt_strings = text
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)
return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', False)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs['text_kwargs'], return_tensors=None)
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=['image'])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs['input_ids'])
mm_token_type_ids = np.zeros_like(text_inputs['input_ids'])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs['mm_token_type_ids'] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = AriaProcessorKwargs._defaults.get('images_kwargs', {})
images_kwargs.update(kwargs)
max_size = images_kwargs.get('max_image_size', None) or self.image_processor.max_image_size
num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]
num_image_tokens = [self.size_conversion[max_size] * num_patches for num_patches in num_image_patches]
vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})
return MultiModalData(**vision_data)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
image_processor_input_names = [name for name in image_processor_input_names if name != 'num_crops']
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
class AriaProcessor(ProcessorMixin):
'''
AriaProcessor is a processor for the Aria model which wraps the Aria image preprocessor and the LLama slow tokenizer.
Args:
image_processor (`AriaImageProcessor`, *optional*):
The AriaImageProcessor to use for image preprocessing.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
chat_template (`str`, *optional*):
A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string.
size_conversion (`Dict`, *optional*):
A dictionary indicating size conversions for images.
'''
def __init__(self, image_processor=None, tokenizer: Union[AutoTokenizer, str]=None, chat_template: Optional[str]=None, size_conversion: Optional[dict[Union[float, int], int]]=None):
pass
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], images: Optional[ImageInput]=None, audio=None, videos=None, **kwargs: Unpack[AriaProcessorKwargs]) -> BatchFeature:
'''
Main method to prepare for the model one or several sequences(s) and image(s).
Args:
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_mask** -- Pixel mask to be fed to a model. Returned when `images` is not `None`.
'''
pass
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
'''
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
'''
pass
@property
def model_input_names(self):
pass
| 6
| 3
| 19
| 2
| 11
| 6
| 2
| 0.63
| 1
| 10
| 3
| 0
| 5
| 1
| 5
| 22
| 118
| 15
| 63
| 34
| 43
| 40
| 37
| 20
| 31
| 5
| 2
| 2
| 11
|
589
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaProcessorKwargs
|
from ...utils import TensorType, TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
class AriaProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {'text_kwargs': {'padding': False, 'return_mm_token_type_ids': False}, 'images_kwargs': {'max_image_size': 980, 'split_image': False}, 'return_tensors': TensorType.PYTORCH}
|
class AriaProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0
| 11
| 2
| 10
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
590
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaProjector
|
import torch
from typing import Optional, Union
from torch import nn
class AriaProjector(nn.Module):
"""
Aria Projector module.
This module projects vision features into the language model's embedding space, enabling interaction between vision and language components.
Args:
config (`AriaConfig`):
Configuration object for the model.
"""
def __init__(self, config: AriaConfig):
super().__init__()
self.patch_to_query_dict = config.projector_patch_to_query_dict
self.in_features = config.vision_config.hidden_size
self.num_heads = config.vision_config.num_attention_heads
self.kv_dim = config.vision_config.hidden_size
self.hidden_features = config.text_config.hidden_size
self.output_dim = config.text_config.hidden_size
self.query = nn.Parameter(torch.zeros(config.max_value_projector_patch_to_query_dict, self.in_features))
self.cross_attn = AriaCrossAttention(config)
self.layer_norm = nn.LayerNorm(self.in_features)
self.feed_forward = AriaProjectorMLP(self.in_features, self.hidden_features, self.output_dim)
def forward(self, key_value_states: torch.Tensor, attn_mask: Optional[torch.Tensor]=None):
"""
Forward pass of the Projector module.
Args:
key_value_states (`torch.Tensor`):
Input tensor of shape (batch_size, num_patches, kv_dim).
attn_mask (`torch.Tensor`, *optional*, default is None):
Attention mask.
Returns:
`torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim).
"""
batch_size, num_patches = (key_value_states.shape[0], key_value_states.shape[1])
if num_patches not in self.patch_to_query_dict:
raise KeyError(f'Number of patches {num_patches} not found in patch_to_query_dict amongst possible values {self.patch_to_query_dict.keys()}.')
query_num = self.patch_to_query_dict[num_patches]
queries = self.query[:query_num].unsqueeze(0).repeat(batch_size, 1, 1)
if attn_mask is not None:
attn_mask = attn_mask.repeat_interleave(self.num_heads, 0)
attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1)
attention_out = self.cross_attn(key_value_states, queries, attn_mask=attn_mask)
out = self.feed_forward(self.layer_norm(attention_out))
return out
|
class AriaProjector(nn.Module):
'''
Aria Projector module.
This module projects vision features into the language model's embedding space, enabling interaction between vision and language components.
Args:
config (`AriaConfig`):
Configuration object for the model.
'''
def __init__(self, config: AriaConfig):
pass
def forward(self, key_value_states: torch.Tensor, attn_mask: Optional[torch.Tensor]=None):
'''
Forward pass of the Projector module.
Args:
key_value_states (`torch.Tensor`):
Input tensor of shape (batch_size, num_patches, kv_dim).
attn_mask (`torch.Tensor`, *optional*, default is None):
Attention mask.
Returns:
`torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim).
'''
pass
| 3
| 2
| 26
| 6
| 15
| 5
| 2
| 0.57
| 1
| 6
| 3
| 0
| 2
| 10
| 2
| 12
| 63
| 16
| 30
| 21
| 24
| 17
| 25
| 18
| 22
| 3
| 1
| 1
| 4
|
591
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaProjectorMLP
|
from ...activations import ACT2FN
from torch import nn
class AriaProjectorMLP(nn.Module):
"""
Feed-Forward Network module for the Aria Projector.
Args:
in_features (`int`):
Input embedding dimension.
hidden_features (`int`):
Hidden dimension of the feed-forward network.
output_dim (`int`):
Output dimension.
"""
def __init__(self, in_features, hidden_features, output_dim):
super().__init__()
self.linear_in = nn.Linear(in_features, hidden_features, bias=False)
self.linear_out = nn.Linear(hidden_features, output_dim, bias=False)
self.act = ACT2FN['gelu_new']
def forward(self, hidden_states):
hidden_states = self.act(self.linear_in(hidden_states))
hidden_states = self.linear_out(hidden_states)
return hidden_states
|
class AriaProjectorMLP(nn.Module):
'''
Feed-Forward Network module for the Aria Projector.
Args:
in_features (`int`):
Input embedding dimension.
hidden_features (`int`):
Hidden dimension of the feed-forward network.
output_dim (`int`):
Output dimension.
'''
def __init__(self, in_features, hidden_features, output_dim):
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 23
| 3
| 10
| 6
| 7
| 10
| 10
| 6
| 7
| 1
| 1
| 0
| 2
|
592
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaSharedExpertsMLP
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm
class AriaSharedExpertsMLP(LlamaMLP):
"""
Shared Expert MLP for shared experts.
Unlike routed experts, shared experts process all tokens without routing.
This class reconfigures the intermediate size in comparison to the LlamaMLP.
Args:
config (`AriaTextConfig`): Configuration object for the Aria language model.
"""
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts
|
class AriaSharedExpertsMLP(LlamaMLP):
'''
Shared Expert MLP for shared experts.
Unlike routed experts, shared experts process all tokens without routing.
This class reconfigures the intermediate size in comparison to the LlamaMLP.
Args:
config (`AriaTextConfig`): Configuration object for the Aria language model.
'''
def __init__(self, config: AriaTextConfig):
pass
| 2
| 1
| 3
| 0
| 3
| 0
| 1
| 1.75
| 1
| 2
| 1
| 0
| 1
| 1
| 1
| 13
| 14
| 3
| 4
| 3
| 2
| 7
| 4
| 3
| 2
| 1
| 2
| 0
| 1
|
593
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaTextConfig
|
from ..llama.configuration_llama import LlamaConfig
class AriaTextConfig(LlamaConfig):
"""
This class handles the configuration for the text component of the Aria model.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
This class extends the LlamaConfig to include additional parameters specific to the Mixture of Experts (MoE) architecture.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4096):
The size of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
Llama 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 2):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_heads
moe_num_experts (`int`, *optional*, defaults to 8):
The number of experts in the MoE layer.
moe_topk (`int`, *optional*, defaults to 2):
The number of top experts to route to for each token.
moe_num_shared_experts (`int`, *optional*, defaults to 2):
The number of shared experts.
"""
model_type = 'aria_text'
base_config_key = 'text_config'
def __init__(self, intermediate_size: int=4096, moe_num_experts: int=8, moe_topk: int=2, moe_num_shared_experts: int=2, pad_token_id=2, **super_kwargs):
super().__init__(pad_token_id=pad_token_id, **super_kwargs)
self.intermediate_size = intermediate_size
self.moe_num_experts = moe_num_experts
self.moe_topk = moe_topk
self.moe_num_shared_experts = moe_num_shared_experts
|
class AriaTextConfig(LlamaConfig):
'''
This class handles the configuration for the text component of the Aria model.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
This class extends the LlamaConfig to include additional parameters specific to the Mixture of Experts (MoE) architecture.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4096):
The size of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
Llama 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 2):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_heads
moe_num_experts (`int`, *optional*, defaults to 8):
The number of experts in the MoE layer.
moe_topk (`int`, *optional*, defaults to 2):
The number of top experts to route to for each token.
moe_num_shared_experts (`int`, *optional*, defaults to 2):
The number of shared experts.
'''
def __init__(self, intermediate_size: int=4096, moe_num_experts: int=8, moe_topk: int=2, moe_num_shared_experts: int=2, pad_token_id=2, **super_kwargs):
pass
| 2
| 1
| 14
| 0
| 14
| 0
| 1
| 6.12
| 1
| 2
| 0
| 0
| 1
| 4
| 1
| 2
| 124
| 3
| 17
| 16
| 7
| 104
| 9
| 8
| 7
| 1
| 2
| 0
| 1
|
594
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaTextDecoderLayer
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm
class AriaTextDecoderLayer(LlamaDecoderLayer):
"""
Aria Text Decoder Layer.
This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
layer_idx (`int`):
Index of the layer.
"""
def __init__(self, config: AriaTextConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.mlp = AriaTextMoELayer(config)
|
class AriaTextDecoderLayer(LlamaDecoderLayer):
'''
Aria Text Decoder Layer.
This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
layer_idx (`int`):
Index of the layer.
'''
def __init__(self, config: AriaTextConfig, layer_idx: int):
pass
| 2
| 1
| 3
| 0
| 3
| 0
| 1
| 2.25
| 1
| 4
| 2
| 0
| 1
| 1
| 1
| 13
| 16
| 3
| 4
| 3
| 2
| 9
| 4
| 3
| 2
| 1
| 2
| 0
| 1
|
595
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaTextForCausalLM
|
from torch import nn
from ...utils import TensorType, TransformersKwargs, auto_docstring, can_return_tuple, logging
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm
class AriaTextForCausalLM(AriaTextPreTrainedModel, LlamaForCausalLM):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.model = AriaTextModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@auto_docstring
def forward(self, **super_kwargs):
super().forward(self, **super_kwargs)
|
class AriaTextForCausalLM(AriaTextPreTrainedModel, LlamaForCausalLM):
def __init__(self, config: AriaTextConfig):
pass
@auto_docstring
def forward(self, **super_kwargs):
pass
| 4
| 0
| 8
| 1
| 6
| 1
| 1
| 1
| 2
| 3
| 2
| 0
| 1
| 3
| 1
| 140
| 23
| 5
| 9
| 7
| 7
| 9
| 9
| 7
| 7
| 1
| 3
| 0
| 1
|
596
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaTextMoELayer
|
import torch
from torch import nn
class AriaTextMoELayer(nn.Module):
"""
Aria Text Mixture of Experts (MoE) Layer.
This layer applies a gating mechanism to route input tokens to different experts.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
"""
def __init__(self, config: AriaTextConfig):
super().__init__()
self.router = nn.Linear(config.hidden_size, config.moe_num_experts, bias=False)
self.experts = AriaGroupedExpertsMLP(config)
self.shared_experts = AriaSharedExpertsMLP(config)
self.config = config
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the MoE Layer.
Args:
hidden_states (`torch.Tensor`):
Input tensor of shape (batch_size, sequence_length, hidden_size).
Returns:
torch.Tensor: Output tensor after passing through the MoE layer.
Process:
1. Route tokens to experts using the router.
2. Permute tokens based on routing decisions.
3. Process tokens through experts.
4. Unpermute and combine expert outputs.
5. Add shared expert output to the final result.
"""
original_shape = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_states.size(-1))
logits = self.router(hidden_states)
top_logits, top_indices = torch.topk(logits, k=self.config.moe_topk, dim=1)
scores = nn.functional.softmax(top_logits, dim=-1)
original_dtype = top_indices.dtype
tokens_per_expert = torch.histc(top_indices.flatten().to(torch.float32), bins=self.config.moe_num_experts, min=0, max=self.config.moe_num_experts - 1).to(original_dtype)
indices = top_indices
flatten_indices = indices.view(-1)
sorted_indices = torch.argsort(flatten_indices)
permuted_tokens = hidden_states.index_select(0, sorted_indices // self.config.moe_topk)
expert_output = self.experts(permuted_tokens, tokens_per_expert)
unpermuted_tokens = torch.zeros((scores.shape[0] * self.config.moe_topk, expert_output.size(1)), dtype=expert_output.dtype, device=expert_output.device)
unpermuted_tokens.index_copy_(0, sorted_indices, expert_output)
unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1))
output = (unpermuted_tokens * scores.unsqueeze(-1)).sum(dim=1).view(original_shape)
shared_expert_output = self.shared_experts(hidden_states.view(original_shape))
return output + shared_expert_output
|
class AriaTextMoELayer(nn.Module):
'''
Aria Text Mixture of Experts (MoE) Layer.
This layer applies a gating mechanism to route input tokens to different experts.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
'''
def __init__(self, config: AriaTextConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
'''
Forward pass of the MoE Layer.
Args:
hidden_states (`torch.Tensor`):
Input tensor of shape (batch_size, sequence_length, hidden_size).
Returns:
torch.Tensor: Output tensor after passing through the MoE layer.
Process:
1. Route tokens to experts using the router.
2. Permute tokens based on routing decisions.
3. Process tokens through experts.
4. Unpermute and combine expert outputs.
5. Add shared expert output to the final result.
'''
pass
| 3
| 2
| 33
| 6
| 17
| 10
| 1
| 0.74
| 1
| 5
| 3
| 0
| 2
| 4
| 2
| 12
| 77
| 16
| 35
| 21
| 32
| 26
| 26
| 21
| 23
| 1
| 1
| 0
| 2
|
597
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaTextModel
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm
from torch import nn
class AriaTextModel(LlamaModel):
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.layers = nn.ModuleList([AriaTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
self.post_init()
|
class AriaTextModel(LlamaModel):
def __init__(self, config: AriaTextConfig):
pass
| 2
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 4
| 2
| 0
| 1
| 2
| 1
| 8
| 8
| 0
| 8
| 4
| 6
| 0
| 6
| 4
| 4
| 1
| 3
| 0
| 1
|
598
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaTextPreTrainedModel
|
from ...utils import TensorType, TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...modeling_utils import PreTrainedModel
@auto_docstring
class AriaTextPreTrainedModel(PreTrainedModel):
config: AriaTextConfig
base_model_prefix = 'model'
_no_split_modules = ['AriaTextDecoderLayer', 'AriaGroupedExpertsGemm']
supports_gradient_checkpointing = True
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = True
_supports_sdpa = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': AriaTextDecoderLayer, 'attentions': AriaTextAttention}
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, AriaGroupedExpertsGemm):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
@auto_docstring
class AriaTextPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 16
| 0
| 16
| 0
| 8
| 0.12
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 130
| 30
| 2
| 25
| 11
| 23
| 3
| 22
| 11
| 20
| 8
| 2
| 2
| 8
|
599
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/modular_aria.py
|
transformers.models.aria.modular_aria.AriaTextRMSNorm
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm
class AriaTextRMSNorm(LlamaRMSNorm):
pass
|
class AriaTextRMSNorm(LlamaRMSNorm):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.