id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,600
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasPredictionHeadTransform
|
from ...activations import ACT2FN
import torch
from torch import nn
class TapasPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class TapasPredictionHeadTransform(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 15
| 1
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
5,601
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasSelfAttention
|
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils.deprecation import deprecate_kwarg
import math
from torch import nn
class TapasSelfAttention(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
self.layer_idx = layer_idx
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, past_key_values=None, output_attentions=False, cache_position=None):
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
is_updated = False
is_cross_attention = encoder_hidden_states is not None
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_layer = curr_past_key_value.layers[self.layer_idx].keys
value_layer = curr_past_key_value.layers[self.layer_idx].values
else:
key_layer = self.key(current_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(current_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_layer, value_layer = curr_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_values,)
return outputs
|
class TapasSelfAttention(nn.Module):
def __init__(self, config, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, past_key_values=None, output_attentions=False, cache_position=None):
pass
| 4
| 0
| 30
| 5
| 22
| 3
| 4
| 0.15
| 1
| 3
| 0
| 0
| 3
| 8
| 3
| 13
| 93
| 16
| 67
| 32
| 54
| 10
| 52
| 23
| 48
| 9
| 1
| 1
| 12
|
5,602
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasSelfOutput
|
import torch
from torch import nn
class TapasSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class TapasSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
5,603
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.BasicTokenizer
|
import unicodedata
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
do_split_on_punc (`bool`, *optional*, defaults to `True`):
In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
the full context of the words, such as contractions.
"""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True):
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
self.strip_accents = strip_accents
self.do_split_on_punc = do_split_on_punc
def tokenize(self, text, never_split=None):
"""
Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
Args:
never_split (`List[str]`, *optional*)
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
[`PreTrainedTokenizer.tokenize`]) List of token not to split.
"""
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
text = self._clean_text(text)
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
unicode_normalized_text = unicodedata.normalize('NFC', text)
orig_tokens = whitespace_tokenize(unicode_normalized_text)
split_tokens = []
for token in orig_tokens:
if token not in never_split:
if self.do_lower_case:
token = token.lower()
if self.strip_accents is not False:
token = self._run_strip_accents(token)
elif self.strip_accents:
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == 'Mn':
continue
output.append(char)
return ''.join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if not self.do_split_on_punc or (never_split is not None and text in never_split):
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return [''.join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103):
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 65533 or _is_control(char):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output)
|
class BasicTokenizer:
'''
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
do_split_on_punc (`bool`, *optional*, defaults to `True`):
In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
the full context of the words, such as contractions.
'''
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True):
pass
def tokenize(self, text, never_split=None):
'''
Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
Args:
never_split (`List[str]`, *optional*)
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
[`PreTrainedTokenizer.tokenize`]) List of token not to split.
'''
pass
def _run_strip_accents(self, text):
'''Strips accents from a piece of text.'''
pass
def _run_split_on_punc(self, text, never_split=None):
'''Splits punctuation on a piece of text.'''
pass
def _tokenize_chinese_chars(self, text):
'''Adds whitespace around any CJK character.'''
pass
def _is_chinese_char(self, cp):
'''Checks whether CP is the codepoint of a CJK character.'''
pass
def _clean_text(self, text):
'''Performs invalid character removal and whitespace cleanup on text.'''
pass
| 8
| 7
| 19
| 1
| 14
| 5
| 4
| 0.55
| 0
| 2
| 0
| 0
| 7
| 5
| 7
| 7
| 159
| 14
| 98
| 39
| 83
| 54
| 78
| 32
| 70
| 8
| 0
| 4
| 27
|
5,604
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.Cell
|
from typing import Callable, Optional, Union
from dataclasses import dataclass
@dataclass
class Cell:
text: str
numeric_value: Optional[NumericValue] = None
|
@dataclass
class Cell:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 2
| 2
| 0
| 3
| 2
| 2
| 0
| 0
| 0
| 0
|
5,605
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.Date
|
from typing import Callable, Optional, Union
from dataclasses import dataclass
@dataclass
class Date:
year: Optional[int] = None
month: Optional[int] = None
day: Optional[int] = None
|
@dataclass
class Date:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 4
| 3
| 0
| 4
| 4
| 3
| 0
| 0
| 0
| 0
|
5,606
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.NumericValue
|
from typing import Callable, Optional, Union
from dataclasses import dataclass
@dataclass
class NumericValue:
float_value: Optional[float] = None
date: Optional[Date] = None
|
@dataclass
class NumericValue:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 0
| 0
| 0
|
5,607
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.NumericValueSpan
|
from typing import Callable, Optional, Union
from dataclasses import dataclass
@dataclass
class NumericValueSpan:
begin_index: Optional[int] = None
end_index: Optional[int] = None
values: list[NumericValue] = None
|
@dataclass
class NumericValueSpan:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 4
| 3
| 0
| 4
| 4
| 3
| 0
| 0
| 0
| 0
|
5,608
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.Question
|
from typing import Callable, Optional, Union
from dataclasses import dataclass
@dataclass
class Question:
original_text: str
text: str
numeric_spans: Optional[list[NumericValueSpan]] = None
|
@dataclass
class Question:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 2
| 3
| 2
| 4
| 2
| 3
| 0
| 0
| 0
| 0
|
5,609
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.Relation
|
import enum
class Relation(enum.Enum):
HEADER_TO_CELL = 1
CELL_TO_HEADER = 2
QUERY_TO_HEADER = 3
QUERY_TO_CELL = 4
ROW_TO_CELL = 5
CELL_TO_ROW = 6
EQ = 7
LT = 8
GT = 9
|
class Relation(enum.Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 10
| 0
| 10
| 10
| 9
| 9
| 10
| 10
| 9
| 0
| 4
| 0
| 0
|
5,610
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.SerializedExample
|
from dataclasses import dataclass
@dataclass(frozen=True)
class SerializedExample:
tokens: list[str]
column_ids: list[int]
row_ids: list[int]
segment_ids: list[int]
|
@dataclass(frozen=True)
class SerializedExample:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0
| 5
| 1
| 4
| 0
| 5
| 1
| 4
| 0
| 0
| 0
| 0
|
5,611
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.TapasTokenizer
|
from ...utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available, logging
from collections.abc import Generator
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
import numpy as np
import collections
import os
from ...tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, VERY_LARGE_INTEGER, BatchEncoding, EncodedInput, PreTokenizedInput, TextInput
from typing import Callable, Optional, Union
class TapasTokenizer(PreTrainedTokenizer):
"""
Construct a TAPAS tokenizer. Based on WordPiece. Flattens a table and one or more related sentences to be used by
TAPAS models.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods. [`TapasTokenizer`] creates several token type ids to
encode tabular structure. To be more precise, it adds 7 token type ids, in the following order: `segment_ids`,
`column_ids`, `row_ids`, `prev_labels`, `column_ranks`, `inv_column_ranks` and `numeric_relations`:
- segment_ids: indicate whether a token belongs to the question (0) or the table (1). 0 for special tokens and
padding.
- column_ids: indicate to which column of the table a token belongs (starting from 1). Is 0 for all question
tokens, special tokens and padding.
- row_ids: indicate to which row of the table a token belongs (starting from 1). Is 0 for all question tokens,
special tokens and padding. Tokens of column headers are also 0.
- prev_labels: indicate whether a token was (part of) an answer to the previous question (1) or not (0). Useful in
a conversational setup (such as SQA).
- column_ranks: indicate the rank of a table token relative to a column, if applicable. For example, if you have a
column "number of movies" with values 87, 53 and 69, then the column ranks of these tokens are 3, 1 and 2
respectively. 0 for all question tokens, special tokens and padding.
- inv_column_ranks: indicate the inverse rank of a table token relative to a column, if applicable. For example, if
you have a column "number of movies" with values 87, 53 and 69, then the inverse column ranks of these tokens are
1, 3 and 2 respectively. 0 for all question tokens, special tokens and padding.
- numeric_relations: indicate numeric relations between the question and the tokens of the table. 0 for all
question tokens, special tokens and padding.
[`TapasTokenizer`] runs end-to-end tokenization on a table and associated sentences: punctuation splitting and
wordpiece.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
empty_token (`str`, *optional*, defaults to `"[EMPTY]"`):
The token used for empty cell values in a table. Empty cell values include "", "n/a", "nan" and "?".
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
cell_trim_length (`int`, *optional*, defaults to -1):
If > 0: Trim cells so that the length is <= this value. Also disables further cell trimming, should thus be
used with `truncation` set to `True`.
max_column_id (`int`, *optional*):
Max column id to extract.
max_row_id (`int`, *optional*):
Max row id to extract.
strip_column_names (`bool`, *optional*, defaults to `False`):
Whether to add empty strings instead of column names.
update_answer_coordinates (`bool`, *optional*, defaults to `False`):
Whether to recompute the answer coordinates from the answer text.
min_question_length (`int`, *optional*):
Minimum length of each question in terms of tokens (will be skipped otherwise).
max_question_length (`int`, *optional*):
Maximum length of each question in terms of tokens (will be skipped otherwise).
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', empty_token='[EMPTY]', tokenize_chinese_chars=True, strip_accents=None, cell_trim_length: int=-1, max_column_id: Optional[int]=None, max_row_id: Optional[int]=None, strip_column_names: bool=False, update_answer_coordinates: bool=False, min_question_length=None, max_question_length=None, model_max_length: int=512, additional_special_tokens: Optional[list[str]]=None, clean_up_tokenization_spaces=True, **kwargs):
if not is_pandas_available():
raise ImportError('Pandas is required for the TAPAS tokenizer.')
if additional_special_tokens is not None:
if empty_token not in additional_special_tokens:
additional_special_tokens.append(empty_token)
else:
additional_special_tokens = [empty_token]
if not os.path.isfile(vocab_file):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
self.cell_trim_length = cell_trim_length
self.max_column_id = max_column_id if max_column_id is not None else model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
self.max_row_id = max_row_id if max_row_id is not None else model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
self.strip_column_names = strip_column_names
self.update_answer_coordinates = update_answer_coordinates
self.min_question_length = min_question_length
self.max_question_length = max_question_length
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, empty_token=empty_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, cell_trim_length=cell_trim_length, max_column_id=max_column_id, max_row_id=max_row_id, strip_column_names=strip_column_names, update_answer_coordinates=update_answer_coordinates, min_question_length=min_question_length, max_question_length=max_question_length, model_max_length=model_max_length, additional_special_tokens=additional_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
if format_text(text) == EMPTY_TEXT:
return [self.additional_special_tokens[0]]
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(vocab_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
def create_attention_mask_from_sequences(self, query_ids: list[int], table_values: list[TableValue]) -> list[int]:
"""
Creates the attention mask according to the query token IDs and a list of table values.
Args:
query_ids (`list[int]`): list of token IDs corresponding to the ID.
table_values (`list[TableValue]`): lift of table values, which are named tuples containing the
token value, the column ID and the row ID of said token.
Returns:
`list[int]`: List of ints containing the attention mask values.
"""
return [1] * (1 + len(query_ids) + 1 + len(table_values))
def create_segment_token_type_ids_from_sequences(self, query_ids: list[int], table_values: list[TableValue]) -> list[int]:
"""
Creates the segment token type IDs according to the query token IDs and a list of table values.
Args:
query_ids (`list[int]`): list of token IDs corresponding to the ID.
table_values (`list[TableValue]`): lift of table values, which are named tuples containing the
token value, the column ID and the row ID of said token.
Returns:
`list[int]`: List of ints containing the segment token type IDs values.
"""
table_ids = list(zip(*table_values))[0] if table_values else []
return [0] * (1 + len(query_ids) + 1) + [1] * len(table_ids)
def create_column_token_type_ids_from_sequences(self, query_ids: list[int], table_values: list[TableValue]) -> list[int]:
"""
Creates the column token type IDs according to the query token IDs and a list of table values.
Args:
query_ids (`list[int]`): list of token IDs corresponding to the ID.
table_values (`list[TableValue]`): lift of table values, which are named tuples containing the
token value, the column ID and the row ID of said token.
Returns:
`list[int]`: List of ints containing the column token type IDs values.
"""
table_column_ids = list(zip(*table_values))[1] if table_values else []
return [0] * (1 + len(query_ids) + 1) + list(table_column_ids)
def create_row_token_type_ids_from_sequences(self, query_ids: list[int], table_values: list[TableValue]) -> list[int]:
"""
Creates the row token type IDs according to the query token IDs and a list of table values.
Args:
query_ids (`list[int]`): list of token IDs corresponding to the ID.
table_values (`list[TableValue]`): lift of table values, which are named tuples containing the
token value, the column ID and the row ID of said token.
Returns:
`list[int]`: List of ints containing the row token type IDs values.
"""
table_row_ids = list(zip(*table_values))[2] if table_values else []
return [0] * (1 + len(query_ids) + 1) + list(table_row_ids)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a question and flattened table for question answering or sequence classification tasks
by concatenating and adding special tokens.
Args:
token_ids_0 (`list[int]`): The ids of the question.
token_ids_1 (`list[int]`, *optional*): The ids of the flattened table.
Returns:
`list[int]`: The model input with special tokens.
"""
if token_ids_1 is None:
raise ValueError('With TAPAS, you must provide both question IDs and table IDs.')
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of question IDs.
token_ids_1 (`list[int]`, *optional*):
List of flattened table IDs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)
return [1] + [0] * len(token_ids_0) + [1]
@add_end_docstrings(TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, table: 'pd.DataFrame', queries: Optional[Union[TextInput, PreTokenizedInput, EncodedInput, list[TextInput], list[PreTokenizedInput], list[EncodedInput]]]=None, answer_coordinates: Optional[Union[list[tuple], list[list[tuple]]]]=None, answer_text: Optional[Union[list[TextInput], list[list[TextInput]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) related to a table.
Args:
table (`pd.DataFrame`):
Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
dataframe to convert it to string.
queries (`str` or `list[str]`):
Question or batch of questions related to a table to be encoded. Note that in case of a batch, all
questions must refer to the **same** table.
answer_coordinates (`list[Tuple]` or `list[list[Tuple]]`, *optional*):
Answer coordinates of each table-question pair in the batch. In case only a single table-question pair
is provided, then the answer_coordinates must be a single list of one or more tuples. Each tuple must
be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The
first column has index 0. In case a batch of table-question pairs is provided, then the
answer_coordinates must be a list of lists of tuples (each list corresponding to a single
table-question pair).
answer_text (`list[str]` or `list[list[str]]`, *optional*):
Answer text of each table-question pair in the batch. In case only a single table-question pair is
provided, then the answer_text must be a single list of one or more strings. Each string must be the
answer text of a corresponding answer coordinate. In case a batch of table-question pairs is provided,
then the answer_coordinates must be a list of lists of strings (each list corresponding to a single
table-question pair).
"""
assert isinstance(table, pd.DataFrame), 'Table must be of type pd.DataFrame'
valid_query = False
if queries is None or isinstance(queries, str):
valid_query = True
elif isinstance(queries, (list, tuple)):
if len(queries) == 0 or isinstance(queries[0], str):
valid_query = True
if not valid_query:
raise ValueError('queries input must of type `str` (single example), `list[str]` (batch or single pretokenized example). ')
is_batched = isinstance(queries, (list, tuple))
if is_batched:
return self.batch_encode_plus(table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(table=table, query=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, table: 'pd.DataFrame', queries: Optional[Union[list[TextInput], list[PreTokenizedInput], list[EncodedInput]]]=None, answer_coordinates: Optional[list[list[tuple]]]=None, answer_text: Optional[list[list[TextInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Prepare a table and a list of strings for the model.
<Tip warning={true}>
This method is deprecated, `__call__` should be used instead.
</Tip>
Args:
table (`pd.DataFrame`):
Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
dataframe to convert it to string.
queries (`list[str]`):
Batch of questions related to a table to be encoded. Note that all questions must refer to the **same**
table.
answer_coordinates (`list[Tuple]` or `list[list[Tuple]]`, *optional*):
Answer coordinates of each table-question pair in the batch. Each tuple must be a (row_index,
column_index) pair. The first data row (not the column header row) has index 0. The first column has
index 0. The answer_coordinates must be a list of lists of tuples (each list corresponding to a single
table-question pair).
answer_text (`list[str]` or `list[list[str]]`, *optional*):
Answer text of each table-question pair in the batch. In case a batch of table-question pairs is
provided, then the answer_coordinates must be a list of lists of strings (each list corresponding to a
single table-question pair). Each string must be the answer text of a corresponding answer coordinate.
"""
if return_token_type_ids is not None and (not add_special_tokens):
raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')
if answer_coordinates and (not answer_text) or (not answer_coordinates and answer_text):
raise ValueError('In case you provide answers, both answer_coordinates and answer_text should be provided')
elif answer_coordinates is None and answer_text is None:
answer_coordinates = answer_text = [None] * len(queries)
if 'is_split_into_words' in kwargs:
raise NotImplementedError('Currently TapasTokenizer only supports questions as strings.')
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.')
return self._batch_encode_plus(table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _get_question_tokens(self, query):
"""Tokenizes the query, taking into account the max and min question length."""
query_tokens = self.tokenize(query)
if self.max_question_length is not None and len(query_tokens) > self.max_question_length:
logger.warning('Skipping query as its tokens are longer than the max question length')
return ('', [])
if self.min_question_length is not None and len(query_tokens) < self.min_question_length:
logger.warning('Skipping query as its tokens are shorter than the min question length')
return ('', [])
return (query, query_tokens)
def _batch_encode_plus(self, table, queries: Union[list[TextInput], list[PreTokenizedInput], list[EncodedInput]], answer_coordinates: Optional[list[list[tuple]]]=None, answer_text: Optional[list[list[TextInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=True, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
table_tokens = self._tokenize_table(table)
queries_tokens = []
for idx, query in enumerate(queries):
query, query_tokens = self._get_question_tokens(query)
queries[idx] = query
queries_tokens.append(query_tokens)
batch_outputs = self._batch_prepare_for_model(table, queries, tokenized_table=table_tokens, queries_tokens=queries_tokens, answer_coordinates=answer_coordinates, padding=padding, truncation=truncation, answer_text=answer_text, add_special_tokens=add_special_tokens, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose)
return BatchEncoding(batch_outputs)
def _batch_prepare_for_model(self, raw_table: 'pd.DataFrame', raw_queries: Union[list[TextInput], list[PreTokenizedInput], list[EncodedInput]], tokenized_table: Optional[TokenizedTable]=None, queries_tokens: Optional[list[list[str]]]=None, answer_coordinates: Optional[list[list[tuple]]]=None, answer_text: Optional[list[list[TextInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=True, return_attention_mask: Optional[bool]=True, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
batch_outputs = {}
for index, example in enumerate(zip(raw_queries, queries_tokens, answer_coordinates, answer_text)):
raw_query, query_tokens, answer_coords, answer_txt = example
outputs = self.prepare_for_model(raw_table, raw_query, tokenized_table=tokenized_table, query_tokens=query_tokens, answer_coordinates=answer_coords, answer_text=answer_txt, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation, max_length=max_length, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose, prev_answer_coordinates=answer_coordinates[index - 1] if index != 0 else None, prev_answer_text=answer_text[index - 1] if index != 0 else None)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(batch_outputs, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
def encode(self, table: 'pd.DataFrame', query: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> list[int]:
"""
Prepare a table and a string for the model. This method does not return token type IDs, attention masks, etc.
which are necessary for the model to work correctly. Use that method if you want to build your processing on
your own, otherwise refer to `__call__`.
Args:
table (`pd.DataFrame`):
Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
dataframe to convert it to string.
query (`str` or `list[str]`):
Question related to a table to be encoded.
"""
encoded_inputs = self.encode_plus(table, query=query, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs)
return encoded_inputs['input_ids']
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, table: 'pd.DataFrame', query: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, answer_coordinates: Optional[list[tuple]]=None, answer_text: Optional[list[TextInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Prepare a table and a string for the model.
Args:
table (`pd.DataFrame`):
Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
dataframe to convert it to string.
query (`str` or `list[str]`):
Question related to a table to be encoded.
answer_coordinates (`list[Tuple]` or `list[list[Tuple]]`, *optional*):
Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single
list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row
(not the column header row) has index 0. The first column has index 0.
answer_text (`list[str]` or `list[list[str]]`, *optional*):
Answer text of each table-question pair in the batch. The answer_text must be a single list of one or
more strings. Each string must be the answer text of a corresponding answer coordinate.
"""
if return_token_type_ids is not None and (not add_special_tokens):
raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')
if answer_coordinates and (not answer_text) or (not answer_coordinates and answer_text):
raise ValueError('In case you provide answers, both answer_coordinates and answer_text should be provided')
if 'is_split_into_words' in kwargs:
raise NotImplementedError('Currently TapasTokenizer only supports questions as strings.')
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.')
return self._encode_plus(table=table, query=query, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, truncation=truncation, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _encode_plus(self, table: 'pd.DataFrame', query: Union[TextInput, PreTokenizedInput, EncodedInput], answer_coordinates: Optional[list[tuple]]=None, answer_text: Optional[list[TextInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=True, return_attention_mask: Optional[bool]=True, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs):
if query is None:
query = ''
logger.warning('TAPAS is a question answering model but you have not passed a query. Please be aware that the model will probably not behave correctly.')
table_tokens = self._tokenize_table(table)
query, query_tokens = self._get_question_tokens(query)
return self.prepare_for_model(table, query, tokenized_table=table_tokens, query_tokens=query_tokens, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, truncation=truncation, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, raw_table: 'pd.DataFrame', raw_query: Union[TextInput, PreTokenizedInput, EncodedInput], tokenized_table: Optional[TokenizedTable]=None, query_tokens: Optional[TokenizedTable]=None, answer_coordinates: Optional[list[tuple]]=None, answer_text: Optional[list[TextInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=True, return_attention_mask: Optional[bool]=True, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
"""
Prepares a sequence of input id so that it can be used by the model. It adds special tokens, truncates
sequences if overflowing while taking into account the special tokens.
Args:
raw_table (`pd.DataFrame`):
The original table before any transformation (like tokenization) was applied to it.
raw_query (`TextInput` or `PreTokenizedInput` or `EncodedInput`):
The original query before any transformation (like tokenization) was applied to it.
tokenized_table (`TokenizedTable`):
The table after tokenization.
query_tokens (`list[str]`):
The query after tokenization.
answer_coordinates (`list[Tuple]` or `list[list[Tuple]]`, *optional*):
Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single
list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row
(not the column header row) has index 0. The first column has index 0.
answer_text (`list[str]` or `list[list[str]]`, *optional*):
Answer text of each table-question pair in the batch. The answer_text must be a single list of one or
more strings. Each string must be the answer text of a corresponding answer coordinate.
"""
if isinstance(padding, bool):
if padding and (max_length is not None or pad_to_multiple_of is not None):
padding = PaddingStrategy.MAX_LENGTH
else:
padding = PaddingStrategy.DO_NOT_PAD
elif not isinstance(padding, PaddingStrategy):
padding = PaddingStrategy(padding)
if isinstance(truncation, bool):
if truncation:
truncation = TapasTruncationStrategy.DROP_ROWS_TO_FIT
else:
truncation = TapasTruncationStrategy.DO_NOT_TRUNCATE
elif not isinstance(truncation, TapasTruncationStrategy):
truncation = TapasTruncationStrategy(truncation)
encoded_inputs = {}
is_part_of_batch = False
prev_answer_coordinates, prev_answer_text = (None, None)
if 'prev_answer_coordinates' in kwargs and 'prev_answer_text' in kwargs:
is_part_of_batch = True
prev_answer_coordinates = kwargs['prev_answer_coordinates']
prev_answer_text = kwargs['prev_answer_text']
num_rows = self._get_num_rows(raw_table, truncation != TapasTruncationStrategy.DO_NOT_TRUNCATE)
num_columns = self._get_num_columns(raw_table)
_, _, num_tokens = self._get_table_boundaries(tokenized_table)
if truncation != TapasTruncationStrategy.DO_NOT_TRUNCATE:
num_rows, num_tokens = self._get_truncated_table_rows(query_tokens, tokenized_table, num_rows, num_columns, max_length, truncation_strategy=truncation)
table_data = list(self._get_table_values(tokenized_table, num_columns, num_rows, num_tokens))
query_ids = self.convert_tokens_to_ids(query_tokens)
table_ids = list(zip(*table_data))[0] if len(table_data) > 0 else list(zip(*table_data))
table_ids = self.convert_tokens_to_ids(list(table_ids))
if 'return_overflowing_tokens' in kwargs and kwargs['return_overflowing_tokens']:
raise ValueError('TAPAS does not return overflowing tokens as it works on tables.')
if add_special_tokens:
input_ids = self.build_inputs_with_special_tokens(query_ids, table_ids)
else:
input_ids = query_ids + table_ids
if max_length is not None and len(input_ids) > max_length:
raise ValueError(f'Could not encode the query and table header given the maximum length. Encoding the query and table header results in a length of {len(input_ids)} which is higher than the max_length of {max_length}')
encoded_inputs['input_ids'] = input_ids
segment_ids = self.create_segment_token_type_ids_from_sequences(query_ids, table_data)
column_ids = self.create_column_token_type_ids_from_sequences(query_ids, table_data)
row_ids = self.create_row_token_type_ids_from_sequences(query_ids, table_data)
if not is_part_of_batch or (prev_answer_coordinates is None and prev_answer_text is None):
prev_labels = [0] * len(row_ids)
else:
prev_labels = self.get_answer_ids(column_ids, row_ids, table_data, prev_answer_text, prev_answer_coordinates)
raw_table = add_numeric_table_values(raw_table)
raw_query = add_numeric_values_to_question(raw_query)
column_ranks, inv_column_ranks = self._get_numeric_column_ranks(column_ids, row_ids, raw_table)
numeric_relations = self._get_numeric_relations(raw_query, column_ids, row_ids, raw_table)
if return_token_type_ids is None:
return_token_type_ids = 'token_type_ids' in self.model_input_names
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
if return_attention_mask:
attention_mask = self.create_attention_mask_from_sequences(query_ids, table_data)
encoded_inputs['attention_mask'] = attention_mask
if answer_coordinates is not None and answer_text is not None:
labels = self.get_answer_ids(column_ids, row_ids, table_data, answer_text, answer_coordinates)
numeric_values = self._get_numeric_values(raw_table, column_ids, row_ids)
numeric_values_scale = self._get_numeric_values_scale(raw_table, column_ids, row_ids)
encoded_inputs['labels'] = labels
encoded_inputs['numeric_values'] = numeric_values
encoded_inputs['numeric_values_scale'] = numeric_values_scale
if return_token_type_ids:
token_type_ids = [segment_ids, column_ids, row_ids, prev_labels, column_ranks, inv_column_ranks, numeric_relations]
token_type_ids = [list(ids) for ids in list(zip(*token_type_ids))]
encoded_inputs['token_type_ids'] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(query_ids, table_ids)
else:
encoded_inputs['special_tokens_mask'] = [0] * len(input_ids)
if max_length is None and len(encoded_inputs['input_ids']) > self.model_max_length and verbose:
if not self.deprecation_warnings.get('sequence-length-is-longer-than-the-specified-maximum', False):
logger.warning(f"Token indices sequence length is longer than the specified maximum sequence length for this model ({len(encoded_inputs['input_ids'])} > {self.model_max_length}). Running this sequence through the model will result in indexing errors.")
self.deprecation_warnings['sequence-length-is-longer-than-the-specified-maximum'] = True
if padding != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(encoded_inputs, max_length=max_length, padding=padding.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
if return_length:
encoded_inputs['length'] = len(encoded_inputs['input_ids'])
batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)
return batch_outputs
def _get_truncated_table_rows(self, query_tokens: list[str], tokenized_table: TokenizedTable, num_rows: int, num_columns: int, max_length: int, truncation_strategy: Union[str, TapasTruncationStrategy]) -> tuple[int, int]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
query_tokens (`list[str]`):
List of strings corresponding to the tokenized query.
tokenized_table (`TokenizedTable`):
Tokenized table
num_rows (`int`):
Total number of table rows
num_columns (`int`):
Total number of table columns
max_length (`int`):
Total maximum length.
truncation_strategy (`str` or [`TapasTruncationStrategy]`):
Truncation strategy to use. Seeing as this method should only be called when truncating, the only
available strategy is the `"drop_rows_to_fit"` strategy.
Returns:
`Tuple(int, int)`: tuple containing the number of rows after truncation, and the number of tokens available
for each table element.
"""
if not isinstance(truncation_strategy, TapasTruncationStrategy):
truncation_strategy = TapasTruncationStrategy(truncation_strategy)
if max_length is None:
max_length = self.model_max_length
if truncation_strategy == TapasTruncationStrategy.DROP_ROWS_TO_FIT:
while True:
num_tokens = self._get_max_num_tokens(query_tokens, tokenized_table, num_rows=num_rows, num_columns=num_columns, max_length=max_length)
if num_tokens is not None:
break
num_rows -= 1
if num_rows < 1:
break
elif truncation_strategy != TapasTruncationStrategy.DO_NOT_TRUNCATE:
raise ValueError(f'Unknown truncation strategy {truncation_strategy}.')
return (num_rows, num_tokens or 1)
def _tokenize_table(self, table=None):
"""
Tokenizes column headers and cell texts of a table.
Args:
table (`pd.Dataframe`):
Table. Returns: `TokenizedTable`: TokenizedTable object.
"""
tokenized_rows = []
tokenized_row = []
for column in table:
if self.strip_column_names:
tokenized_row.append(self.tokenize(''))
else:
tokenized_row.append(self.tokenize(column))
tokenized_rows.append(tokenized_row)
for idx, row in table.iterrows():
tokenized_row = []
for cell in row:
tokenized_row.append(self.tokenize(cell))
tokenized_rows.append(tokenized_row)
token_coordinates = []
for row_index, row in enumerate(tokenized_rows):
for column_index, cell in enumerate(row):
for token_index, _ in enumerate(cell):
token_coordinates.append(TokenCoordinates(row_index=row_index, column_index=column_index, token_index=token_index))
return TokenizedTable(rows=tokenized_rows, selected_tokens=token_coordinates)
def _question_encoding_cost(self, question_tokens):
return len(question_tokens) + 2
def _get_token_budget(self, question_tokens, max_length=None):
"""
Computes the number of tokens left for the table after tokenizing a question, taking into account the max
sequence length of the model.
Args:
question_tokens (`list[String]`):
List of question tokens. Returns: `int`: the number of tokens left for the table, given the model max
length.
"""
return (max_length if max_length is not None else self.model_max_length) - self._question_encoding_cost(question_tokens)
def _get_table_values(self, table, num_columns, num_rows, num_tokens) -> Generator[TableValue, None, None]:
"""Iterates over partial table and returns token, column and row indexes."""
for tc in table.selected_tokens:
if tc.row_index >= num_rows + 1:
continue
if tc.column_index >= num_columns:
continue
cell = table.rows[tc.row_index][tc.column_index]
token = cell[tc.token_index]
word_begin_index = tc.token_index
while word_begin_index >= 0 and _is_inner_wordpiece(cell[word_begin_index]):
word_begin_index -= 1
if word_begin_index >= num_tokens:
continue
yield TableValue(token, tc.column_index + 1, tc.row_index)
def _get_table_boundaries(self, table):
"""Return maximal number of rows, columns and tokens."""
max_num_tokens = 0
max_num_columns = 0
max_num_rows = 0
for tc in table.selected_tokens:
max_num_columns = max(max_num_columns, tc.column_index + 1)
max_num_rows = max(max_num_rows, tc.row_index + 1)
max_num_tokens = max(max_num_tokens, tc.token_index + 1)
max_num_columns = min(self.max_column_id, max_num_columns)
max_num_rows = min(self.max_row_id, max_num_rows)
return (max_num_rows, max_num_columns, max_num_tokens)
def _get_table_cost(self, table, num_columns, num_rows, num_tokens):
return sum((1 for _ in self._get_table_values(table, num_columns, num_rows, num_tokens)))
def _get_max_num_tokens(self, question_tokens, tokenized_table, num_columns, num_rows, max_length):
"""Computes max number of tokens that can be squeezed into the budget."""
token_budget = self._get_token_budget(question_tokens, max_length)
_, _, max_num_tokens = self._get_table_boundaries(tokenized_table)
if self.cell_trim_length >= 0 and max_num_tokens > self.cell_trim_length:
max_num_tokens = self.cell_trim_length
num_tokens = 0
for num_tokens in range(max_num_tokens + 1):
cost = self._get_table_cost(tokenized_table, num_columns, num_rows, num_tokens + 1)
if cost > token_budget:
break
if num_tokens < max_num_tokens:
if self.cell_trim_length >= 0:
return None
if num_tokens == 0:
return None
return num_tokens
def _get_num_columns(self, table):
num_columns = table.shape[1]
if num_columns >= self.max_column_id:
raise ValueError('Too many columns')
return num_columns
def _get_num_rows(self, table, drop_rows_to_fit):
num_rows = table.shape[0]
if num_rows >= self.max_row_id:
if drop_rows_to_fit:
num_rows = self.max_row_id - 1
else:
raise ValueError('Too many rows')
return num_rows
def _serialize_text(self, question_tokens):
"""Serializes texts in index arrays."""
tokens = []
segment_ids = []
column_ids = []
row_ids = []
tokens.append(self.cls_token)
segment_ids.append(0)
column_ids.append(0)
row_ids.append(0)
for token in question_tokens:
tokens.append(token)
segment_ids.append(0)
column_ids.append(0)
row_ids.append(0)
return (tokens, segment_ids, column_ids, row_ids)
def _serialize(self, question_tokens, table, num_columns, num_rows, num_tokens):
"""Serializes table and text."""
tokens, segment_ids, column_ids, row_ids = self._serialize_text(question_tokens)
tokens.append(self.sep_token)
segment_ids.append(0)
column_ids.append(0)
row_ids.append(0)
for token, column_id, row_id in self._get_table_values(table, num_columns, num_rows, num_tokens):
tokens.append(token)
segment_ids.append(1)
column_ids.append(column_id)
row_ids.append(row_id)
return SerializedExample(tokens=tokens, segment_ids=segment_ids, column_ids=column_ids, row_ids=row_ids)
def _get_column_values(self, table, col_index):
table_numeric_values = {}
for row_index, row in table.iterrows():
cell = row[col_index]
if cell.numeric_value is not None:
table_numeric_values[row_index] = cell.numeric_value
return table_numeric_values
def _get_cell_token_indexes(self, column_ids, row_ids, column_id, row_id):
for index in range(len(column_ids)):
if column_ids[index] - 1 == column_id and row_ids[index] - 1 == row_id:
yield index
def _get_numeric_column_ranks(self, column_ids, row_ids, table):
"""Returns column ranks for all numeric columns."""
ranks = [0] * len(column_ids)
inv_ranks = [0] * len(column_ids)
if table is not None:
for col_index in range(len(table.columns)):
table_numeric_values = self._get_column_values(table, col_index)
if not table_numeric_values:
continue
try:
key_fn = get_numeric_sort_key_fn(table_numeric_values.values())
except ValueError:
continue
table_numeric_values = {row_index: key_fn(value) for row_index, value in table_numeric_values.items()}
table_numeric_values_inv = collections.defaultdict(list)
for row_index, value in table_numeric_values.items():
table_numeric_values_inv[value].append(row_index)
unique_values = sorted(table_numeric_values_inv.keys())
for rank, value in enumerate(unique_values):
for row_index in table_numeric_values_inv[value]:
for index in self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index):
ranks[index] = rank + 1
inv_ranks[index] = len(unique_values) - rank
return (ranks, inv_ranks)
def _get_numeric_sort_key_fn(self, table_numeric_values, value):
"""
Returns the sort key function for comparing value to table values. The function returned will be a suitable
input for the key param of the sort(). See number_annotation_utils._get_numeric_sort_key_fn for details
Args:
table_numeric_values: Numeric values of a column
value: Numeric value in the question
Returns:
A function key function to compare column and question values.
"""
if not table_numeric_values:
return None
all_values = list(table_numeric_values.values())
all_values.append(value)
try:
return get_numeric_sort_key_fn(all_values)
except ValueError:
return None
def _get_numeric_relations(self, question, column_ids, row_ids, table):
"""
Returns numeric relations embeddings
Args:
question: Question object.
column_ids: Maps word piece position to column id.
row_ids: Maps word piece position to row id.
table: The table containing the numeric cell values.
"""
numeric_relations = [0] * len(column_ids)
cell_indices_to_relations = collections.defaultdict(set)
if question is not None and table is not None:
for numeric_value_span in question.numeric_spans:
for value in numeric_value_span.values:
for column_index in range(len(table.columns)):
table_numeric_values = self._get_column_values(table, column_index)
sort_key_fn = self._get_numeric_sort_key_fn(table_numeric_values, value)
if sort_key_fn is None:
continue
for row_index, cell_value in table_numeric_values.items():
relation = get_numeric_relation(value, cell_value, sort_key_fn)
if relation is not None:
cell_indices_to_relations[column_index, row_index].add(relation)
for (column_index, row_index), relations in cell_indices_to_relations.items():
relation_set_index = 0
for relation in relations:
assert relation.value >= Relation.EQ.value
relation_set_index += 2 ** (relation.value - Relation.EQ.value)
for cell_token_index in self._get_cell_token_indexes(column_ids, row_ids, column_index, row_index):
numeric_relations[cell_token_index] = relation_set_index
return numeric_relations
def _get_numeric_values(self, table, column_ids, row_ids):
"""Returns numeric values for computation of answer loss."""
numeric_values = [float('nan')] * len(column_ids)
if table is not None:
num_rows = table.shape[0]
num_columns = table.shape[1]
for col_index in range(num_columns):
for row_index in range(num_rows):
numeric_value = table.iloc[row_index, col_index].numeric_value
if numeric_value is not None:
if numeric_value.float_value is None:
continue
float_value = numeric_value.float_value
if float_value == float('inf'):
continue
for index in self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index):
numeric_values[index] = float_value
return numeric_values
def _get_numeric_values_scale(self, table, column_ids, row_ids):
"""Returns a scale to each token to down weigh the value of long words."""
numeric_values_scale = [1.0] * len(column_ids)
if table is None:
return numeric_values_scale
num_rows = table.shape[0]
num_columns = table.shape[1]
for col_index in range(num_columns):
for row_index in range(num_rows):
indices = list(self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index))
num_indices = len(indices)
if num_indices > 1:
for index in indices:
numeric_values_scale[index] = float(num_indices)
return numeric_values_scale
def _pad_to_seq_length(self, inputs):
while len(inputs) > self.model_max_length:
inputs.pop()
while len(inputs) < self.model_max_length:
inputs.append(0)
def _get_all_answer_ids_from_coordinates(self, column_ids, row_ids, answers_list):
"""Maps lists of answer coordinates to token indexes."""
answer_ids = [0] * len(column_ids)
found_answers = set()
all_answers = set()
for answers in answers_list:
column_index, row_index = answers
all_answers.add((column_index, row_index))
for index in self._get_cell_token_indexes(column_ids, row_ids, column_index, row_index):
found_answers.add((column_index, row_index))
answer_ids[index] = 1
missing_count = len(all_answers) - len(found_answers)
return (answer_ids, missing_count)
def _get_all_answer_ids(self, column_ids, row_ids, answer_coordinates):
"""
Maps answer coordinates of a question to token indexes.
In the SQA format (TSV), the coordinates are given as (row, column) tuples. Here, we first swap them to
(column, row) format before calling _get_all_answer_ids_from_coordinates.
"""
def _to_coordinates(answer_coordinates_question):
return [(coords[1], coords[0]) for coords in answer_coordinates_question]
return self._get_all_answer_ids_from_coordinates(column_ids, row_ids, answers_list=_to_coordinates(answer_coordinates))
def _find_tokens(self, text, segment):
"""Return start index of segment in text or None."""
logging.info(f'text: {text} {segment}')
for index in range(1 + len(text) - len(segment)):
for seg_index, seg_token in enumerate(segment):
if text[index + seg_index].piece != seg_token.piece:
break
else:
return index
return None
def _find_answer_coordinates_from_answer_text(self, tokenized_table, answer_text):
"""Returns all occurrences of answer_text in the table."""
logging.info(f'answer text: {answer_text}')
for row_index, row in enumerate(tokenized_table.rows):
if row_index == 0:
continue
for col_index, cell in enumerate(row):
token_index = self._find_tokens(cell, answer_text)
if token_index is not None:
yield TokenCoordinates(row_index=row_index, column_index=col_index, token_index=token_index)
def _find_answer_ids_from_answer_texts(self, column_ids, row_ids, tokenized_table, answer_texts):
"""Maps question with answer texts to the first matching token indexes."""
answer_ids = [0] * len(column_ids)
for answer_text in answer_texts:
for coordinates in self._find_answer_coordinates_from_answer_text(tokenized_table, answer_text):
indexes = list(self._get_cell_token_indexes(column_ids, row_ids, column_id=coordinates.column_index, row_id=coordinates.row_index - 1))
indexes.sort()
coordinate_answer_ids = []
if indexes:
begin_index = coordinates.token_index + indexes[0]
end_index = begin_index + len(answer_text)
for index in indexes:
if index >= begin_index and index < end_index:
coordinate_answer_ids.append(index)
if len(coordinate_answer_ids) == len(answer_text):
for index in coordinate_answer_ids:
answer_ids[index] = 1
break
return answer_ids
def _get_answer_ids(self, column_ids, row_ids, answer_coordinates):
"""Maps answer coordinates of a question to token indexes."""
answer_ids, missing_count = self._get_all_answer_ids(column_ids, row_ids, answer_coordinates)
if missing_count:
raise ValueError("Couldn't find all answers")
return answer_ids
def get_answer_ids(self, column_ids, row_ids, tokenized_table, answer_texts_question, answer_coordinates_question):
if self.update_answer_coordinates:
return self._find_answer_ids_from_answer_texts(column_ids, row_ids, tokenized_table, answer_texts=[self.tokenize(at) for at in answer_texts_question])
return self._get_answer_ids(column_ids, row_ids, answer_coordinates_question)
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(encoded_inputs['input_ids'])
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(encoded_inputs['input_ids']) != max_length
if return_attention_mask and 'attention_mask' not in encoded_inputs:
encoded_inputs['attention_mask'] = [1] * len(encoded_inputs['input_ids'])
if needs_to_be_padded:
difference = max_length - len(encoded_inputs['input_ids'])
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == 'right':
if return_attention_mask:
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [[self.pad_token_type_id] * 7] * difference
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = encoded_inputs['labels'] + [0] * difference
if 'numeric_values' in encoded_inputs:
encoded_inputs['numeric_values'] = encoded_inputs['numeric_values'] + [float('nan')] * difference
if 'numeric_values_scale' in encoded_inputs:
encoded_inputs['numeric_values_scale'] = encoded_inputs['numeric_values_scale'] + [1.0] * difference
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference
encoded_inputs['input_ids'] = encoded_inputs['input_ids'] + [self.pad_token_id] * difference
elif padding_side == 'left':
if return_attention_mask:
encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = [[self.pad_token_type_id] * 7] * difference + encoded_inputs['token_type_ids']
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = [0] * difference + encoded_inputs['labels']
if 'numeric_values' in encoded_inputs:
encoded_inputs['numeric_values'] = [float('nan')] * difference + encoded_inputs['numeric_values']
if 'numeric_values_scale' in encoded_inputs:
encoded_inputs['numeric_values_scale'] = [1.0] * difference + encoded_inputs['numeric_values_scale']
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']
encoded_inputs['input_ids'] = [self.pad_token_id] * difference + encoded_inputs['input_ids']
else:
raise ValueError('Invalid padding strategy:' + str(padding_side))
return encoded_inputs
def _get_cell_token_probs(self, probabilities, segment_ids, row_ids, column_ids):
for i, p in enumerate(probabilities):
segment_id = segment_ids[i]
col = column_ids[i] - 1
row = row_ids[i] - 1
if col >= 0 and row >= 0 and (segment_id == 1):
yield (i, p)
def _get_mean_cell_probs(self, probabilities, segment_ids, row_ids, column_ids):
"""Computes average probability per cell, aggregating over tokens."""
coords_to_probs = collections.defaultdict(list)
for i, prob in self._get_cell_token_probs(probabilities, segment_ids, row_ids, column_ids):
col = column_ids[i] - 1
row = row_ids[i] - 1
coords_to_probs[col, row].append(prob)
return {coords: np.array(cell_probs).mean() for coords, cell_probs in coords_to_probs.items()}
def convert_logits_to_predictions(self, data, logits, logits_agg=None, cell_classification_threshold=0.5):
"""
Converts logits of [`TapasForQuestionAnswering`] to actual predicted answer coordinates and optional
aggregation indices.
The original implementation, on which this function is based, can be found
[here](https://github.com/google-research/tapas/blob/4908213eb4df7aa988573350278b44c4dbe3f71b/tapas/experiments/prediction_utils.py#L288).
Args:
data (`dict`):
Dictionary mapping features to actual values. Should be created using [`TapasTokenizer`].
logits (`torch.Tensor` of shape `(batch_size, sequence_length)`):
Tensor containing the logits at the token level.
logits_agg (`torch.Tensor` of shape `(batch_size, num_aggregation_labels)`, *optional*):
Tensor containing the aggregation logits.
cell_classification_threshold (`float`, *optional*, defaults to 0.5):
Threshold to be used for cell selection. All table cells for which their probability is larger than
this threshold will be selected.
Returns:
`tuple` comprising various elements depending on the inputs:
- predicted_answer_coordinates (`list[list[[tuple]]` of length `batch_size`): Predicted answer coordinates
as a list of lists of tuples. Each element in the list contains the predicted answer coordinates of a
single example in the batch, as a list of tuples. Each tuple is a cell, i.e. (row index, column index).
- predicted_aggregation_indices (`list[int]`of length `batch_size`, *optional*, returned when
`logits_aggregation` is provided): Predicted aggregation operator indices of the aggregation head.
"""
logits = logits.numpy()
if logits_agg is not None:
logits_agg = logits_agg.numpy()
data = {key: value.numpy() for key, value in data.items() if key != 'training'}
logits[logits < -88.7] = -88.7
probabilities = 1 / (1 + np.exp(-logits)) * data['attention_mask']
token_types = ['segment_ids', 'column_ids', 'row_ids', 'prev_labels', 'column_ranks', 'inv_column_ranks', 'numeric_relations']
input_ids = data['input_ids']
segment_ids = data['token_type_ids'][:, :, token_types.index('segment_ids')]
row_ids = data['token_type_ids'][:, :, token_types.index('row_ids')]
column_ids = data['token_type_ids'][:, :, token_types.index('column_ids')]
num_batch = input_ids.shape[0]
predicted_answer_coordinates = []
for i in range(num_batch):
probabilities_example = probabilities[i].tolist()
segment_ids_example = segment_ids[i]
row_ids_example = row_ids[i]
column_ids_example = column_ids[i]
max_width = column_ids_example.max()
max_height = row_ids_example.max()
if max_width == 0 and max_height == 0:
continue
cell_coords_to_prob = self._get_mean_cell_probs(probabilities_example, segment_ids_example.tolist(), row_ids_example.tolist(), column_ids_example.tolist())
answer_coordinates = []
for col in range(max_width):
for row in range(max_height):
cell_prob = cell_coords_to_prob.get((col, row), None)
if cell_prob is not None:
if cell_prob > cell_classification_threshold:
answer_coordinates.append((row, col))
answer_coordinates = sorted(answer_coordinates)
predicted_answer_coordinates.append(answer_coordinates)
output = (predicted_answer_coordinates,)
if logits_agg is not None:
predicted_aggregation_indices = logits_agg.argmax(axis=-1)
output = (predicted_answer_coordinates, predicted_aggregation_indices.tolist())
return output
|
class TapasTokenizer(PreTrainedTokenizer):
'''
Construct a TAPAS tokenizer. Based on WordPiece. Flattens a table and one or more related sentences to be used by
TAPAS models.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods. [`TapasTokenizer`] creates several token type ids to
encode tabular structure. To be more precise, it adds 7 token type ids, in the following order: `segment_ids`,
`column_ids`, `row_ids`, `prev_labels`, `column_ranks`, `inv_column_ranks` and `numeric_relations`:
- segment_ids: indicate whether a token belongs to the question (0) or the table (1). 0 for special tokens and
padding.
- column_ids: indicate to which column of the table a token belongs (starting from 1). Is 0 for all question
tokens, special tokens and padding.
- row_ids: indicate to which row of the table a token belongs (starting from 1). Is 0 for all question tokens,
special tokens and padding. Tokens of column headers are also 0.
- prev_labels: indicate whether a token was (part of) an answer to the previous question (1) or not (0). Useful in
a conversational setup (such as SQA).
- column_ranks: indicate the rank of a table token relative to a column, if applicable. For example, if you have a
column "number of movies" with values 87, 53 and 69, then the column ranks of these tokens are 3, 1 and 2
respectively. 0 for all question tokens, special tokens and padding.
- inv_column_ranks: indicate the inverse rank of a table token relative to a column, if applicable. For example, if
you have a column "number of movies" with values 87, 53 and 69, then the inverse column ranks of these tokens are
1, 3 and 2 respectively. 0 for all question tokens, special tokens and padding.
- numeric_relations: indicate numeric relations between the question and the tokens of the table. 0 for all
question tokens, special tokens and padding.
[`TapasTokenizer`] runs end-to-end tokenization on a table and associated sentences: punctuation splitting and
wordpiece.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
empty_token (`str`, *optional*, defaults to `"[EMPTY]"`):
The token used for empty cell values in a table. Empty cell values include "", "n/a", "nan" and "?".
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
cell_trim_length (`int`, *optional*, defaults to -1):
If > 0: Trim cells so that the length is <= this value. Also disables further cell trimming, should thus be
used with `truncation` set to `True`.
max_column_id (`int`, *optional*):
Max column id to extract.
max_row_id (`int`, *optional*):
Max row id to extract.
strip_column_names (`bool`, *optional*, defaults to `False`):
Whether to add empty strings instead of column names.
update_answer_coordinates (`bool`, *optional*, defaults to `False`):
Whether to recompute the answer coordinates from the answer text.
min_question_length (`int`, *optional*):
Minimum length of each question in terms of tokens (will be skipped otherwise).
max_question_length (`int`, *optional*):
Maximum length of each question in terms of tokens (will be skipped otherwise).
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
'''
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', empty_token='[EMPTY]', tokenize_chinese_chars=True, strip_accents=None, cell_trim_length: int=-1, max_column_id: Optional[int]=None, max_row_id: Optional[int]=None, strip_column_names: bool=False, update_answer_coordinates: bool=False, min_question_length=None, max_question_length=None, model_max_length: int=512, additional_special_tokens: Optional[list[str]]=None, clean_up_tokenization_spaces=True, **kwargs):
pass
@property
def do_lower_case(self):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def _tokenize(self, text):
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def create_attention_mask_from_sequences(self, query_ids: list[int], table_values: list[TableValue]) -> list[int]:
'''
Creates the attention mask according to the query token IDs and a list of table values.
Args:
query_ids (`list[int]`): list of token IDs corresponding to the ID.
table_values (`list[TableValue]`): lift of table values, which are named tuples containing the
token value, the column ID and the row ID of said token.
Returns:
`list[int]`: List of ints containing the attention mask values.
'''
pass
def create_segment_token_type_ids_from_sequences(self, query_ids: list[int], table_values: list[TableValue]) -> list[int]:
'''
Creates the segment token type IDs according to the query token IDs and a list of table values.
Args:
query_ids (`list[int]`): list of token IDs corresponding to the ID.
table_values (`list[TableValue]`): lift of table values, which are named tuples containing the
token value, the column ID and the row ID of said token.
Returns:
`list[int]`: List of ints containing the segment token type IDs values.
'''
pass
def create_column_token_type_ids_from_sequences(self, query_ids: list[int], table_values: list[TableValue]) -> list[int]:
'''
Creates the column token type IDs according to the query token IDs and a list of table values.
Args:
query_ids (`list[int]`): list of token IDs corresponding to the ID.
table_values (`list[TableValue]`): lift of table values, which are named tuples containing the
token value, the column ID and the row ID of said token.
Returns:
`list[int]`: List of ints containing the column token type IDs values.
'''
pass
def create_row_token_type_ids_from_sequences(self, query_ids: list[int], table_values: list[TableValue]) -> list[int]:
'''
Creates the row token type IDs according to the query token IDs and a list of table values.
Args:
query_ids (`list[int]`): list of token IDs corresponding to the ID.
table_values (`list[TableValue]`): lift of table values, which are named tuples containing the
token value, the column ID and the row ID of said token.
Returns:
`list[int]`: List of ints containing the row token type IDs values.
'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a question and flattened table for question answering or sequence classification tasks
by concatenating and adding special tokens.
Args:
token_ids_0 (`list[int]`): The ids of the question.
token_ids_1 (`list[int]`, *optional*): The ids of the flattened table.
Returns:
`list[int]`: The model input with special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of question IDs.
token_ids_1 (`list[int]`, *optional*):
List of flattened table IDs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
@add_end_docstrings(TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, table: 'pd.DataFrame', queries: Optional[Union[TextInput, PreTokenizedInput, EncodedInput, list[TextInput], list[PreTokenizedInput], list[EncodedInput]]]=None, answer_coordinates: Optional[Union[list[tuple], list[list[tuple]]]]=None, answer_text: Optional[Union[list[TextInput], list[list[TextInput]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Main method to tokenize and prepare for the model one or several sequence(s) related to a table.
Args:
table (`pd.DataFrame`):
Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
dataframe to convert it to string.
queries (`str` or `list[str]`):
Question or batch of questions related to a table to be encoded. Note that in case of a batch, all
questions must refer to the **same** table.
answer_coordinates (`list[Tuple]` or `list[list[Tuple]]`, *optional*):
Answer coordinates of each table-question pair in the batch. In case only a single table-question pair
is provided, then the answer_coordinates must be a single list of one or more tuples. Each tuple must
be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The
first column has index 0. In case a batch of table-question pairs is provided, then the
answer_coordinates must be a list of lists of tuples (each list corresponding to a single
table-question pair).
answer_text (`list[str]` or `list[list[str]]`, *optional*):
Answer text of each table-question pair in the batch. In case only a single table-question pair is
provided, then the answer_text must be a single list of one or more strings. Each string must be the
answer text of a corresponding answer coordinate. In case a batch of table-question pairs is provided,
then the answer_coordinates must be a list of lists of strings (each list corresponding to a single
table-question pair).
'''
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, table: 'pd.DataFrame', queries: Optional[Union[list[TextInput], list[PreTokenizedInput], list[EncodedInput]]]=None, answer_coordinates: Optional[list[list[tuple]]]=None, answer_text: Optional[list[list[TextInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Prepare a table and a list of strings for the model.
<Tip warning={true}>
This method is deprecated, `__call__` should be used instead.
</Tip>
Args:
table (`pd.DataFrame`):
Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
dataframe to convert it to string.
queries (`list[str]`):
Batch of questions related to a table to be encoded. Note that all questions must refer to the **same**
table.
answer_coordinates (`list[Tuple]` or `list[list[Tuple]]`, *optional*):
Answer coordinates of each table-question pair in the batch. Each tuple must be a (row_index,
column_index) pair. The first data row (not the column header row) has index 0. The first column has
index 0. The answer_coordinates must be a list of lists of tuples (each list corresponding to a single
table-question pair).
answer_text (`list[str]` or `list[list[str]]`, *optional*):
Answer text of each table-question pair in the batch. In case a batch of table-question pairs is
provided, then the answer_coordinates must be a list of lists of strings (each list corresponding to a
single table-question pair). Each string must be the answer text of a corresponding answer coordinate.
'''
pass
def _get_question_tokens(self, query):
'''Tokenizes the query, taking into account the max and min question length.'''
pass
def _batch_encode_plus(self, table, queries: Union[list[TextInput], list[PreTokenizedInput], list[EncodedInput]], answer_coordinates: Optional[list[list[tuple]]]=None, answer_text: Optional[list[list[TextInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=True, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _batch_prepare_for_model(self, raw_table: 'pd.DataFrame', raw_queries: Union[list[TextInput], list[PreTokenizedInput], list[EncodedInput]], tokenized_table: Optional[TokenizedTable]=None, queries_tokens: Optional[list[list[str]]]=None, answer_coordinates: Optional[list[list[tuple]]]=None, answer_text: Optional[list[list[TextInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=True, return_attention_mask: Optional[bool]=True, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
def encode(self, table: 'pd.DataFrame', query: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> list[int]:
'''
Prepare a table and a string for the model. This method does not return token type IDs, attention masks, etc.
which are necessary for the model to work correctly. Use that method if you want to build your processing on
your own, otherwise refer to `__call__`.
Args:
table (`pd.DataFrame`):
Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
dataframe to convert it to string.
query (`str` or `list[str]`):
Question related to a table to be encoded.
'''
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, table: 'pd.DataFrame', query: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, answer_coordinates: Optional[list[tuple]]=None, answer_text: Optional[list[TextInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Prepare a table and a string for the model.
Args:
table (`pd.DataFrame`):
Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
dataframe to convert it to string.
query (`str` or `list[str]`):
Question related to a table to be encoded.
answer_coordinates (`list[Tuple]` or `list[list[Tuple]]`, *optional*):
Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single
list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row
(not the column header row) has index 0. The first column has index 0.
answer_text (`list[str]` or `list[list[str]]`, *optional*):
Answer text of each table-question pair in the batch. The answer_text must be a single list of one or
more strings. Each string must be the answer text of a corresponding answer coordinate.
'''
pass
def _encode_plus(self, table: 'pd.DataFrame', query: Union[TextInput, PreTokenizedInput, EncodedInput], answer_coordinates: Optional[list[tuple]]=None, answer_text: Optional[list[TextInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=True, return_attention_mask: Optional[bool]=True, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs):
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, raw_table: 'pd.DataFrame', raw_query: Union[TextInput, PreTokenizedInput, EncodedInput], tokenized_table: Optional[TokenizedTable]=None, query_tokens: Optional[TokenizedTable]=None, answer_coordinates: Optional[list[tuple]]=None, answer_text: Optional[list[TextInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=True, return_attention_mask: Optional[bool]=True, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
'''
Prepares a sequence of input id so that it can be used by the model. It adds special tokens, truncates
sequences if overflowing while taking into account the special tokens.
Args:
raw_table (`pd.DataFrame`):
The original table before any transformation (like tokenization) was applied to it.
raw_query (`TextInput` or `PreTokenizedInput` or `EncodedInput`):
The original query before any transformation (like tokenization) was applied to it.
tokenized_table (`TokenizedTable`):
The table after tokenization.
query_tokens (`list[str]`):
The query after tokenization.
answer_coordinates (`list[Tuple]` or `list[list[Tuple]]`, *optional*):
Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single
list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row
(not the column header row) has index 0. The first column has index 0.
answer_text (`list[str]` or `list[list[str]]`, *optional*):
Answer text of each table-question pair in the batch. The answer_text must be a single list of one or
more strings. Each string must be the answer text of a corresponding answer coordinate.
'''
pass
def _get_truncated_table_rows(self, query_tokens: list[str], tokenized_table: TokenizedTable, num_rows: int, num_columns: int, max_length: int, truncation_strategy: Union[str, TapasTruncationStrategy]) -> tuple[int, int]:
'''
Truncates a sequence pair in-place following the strategy.
Args:
query_tokens (`list[str]`):
List of strings corresponding to the tokenized query.
tokenized_table (`TokenizedTable`):
Tokenized table
num_rows (`int`):
Total number of table rows
num_columns (`int`):
Total number of table columns
max_length (`int`):
Total maximum length.
truncation_strategy (`str` or [`TapasTruncationStrategy]`):
Truncation strategy to use. Seeing as this method should only be called when truncating, the only
available strategy is the `"drop_rows_to_fit"` strategy.
Returns:
`Tuple(int, int)`: tuple containing the number of rows after truncation, and the number of tokens available
for each table element.
'''
pass
def _tokenize_table(self, table=None):
'''
Tokenizes column headers and cell texts of a table.
Args:
table (`pd.Dataframe`):
Table. Returns: `TokenizedTable`: TokenizedTable object.
'''
pass
def _question_encoding_cost(self, question_tokens):
pass
def _get_token_budget(self, question_tokens, max_length=None):
'''
Computes the number of tokens left for the table after tokenizing a question, taking into account the max
sequence length of the model.
Args:
question_tokens (`list[String]`):
List of question tokens. Returns: `int`: the number of tokens left for the table, given the model max
length.
'''
pass
def _get_table_values(self, table, num_columns, num_rows, num_tokens) -> Generator[TableValue, None, None]:
'''Iterates over partial table and returns token, column and row indexes.'''
pass
def _get_table_boundaries(self, table):
'''Return maximal number of rows, columns and tokens.'''
pass
def _get_table_cost(self, table, num_columns, num_rows, num_tokens):
pass
def _get_max_num_tokens(self, question_tokens, tokenized_table, num_columns, num_rows, max_length):
'''Computes max number of tokens that can be squeezed into the budget.'''
pass
def _get_num_columns(self, table):
pass
def _get_num_rows(self, table, drop_rows_to_fit):
pass
def _serialize_text(self, question_tokens):
'''Serializes texts in index arrays.'''
pass
def _serialize_text(self, question_tokens):
'''Serializes table and text.'''
pass
def _get_column_values(self, table, col_index):
pass
def _get_cell_token_indexes(self, column_ids, row_ids, column_id, row_id):
pass
def _get_numeric_column_ranks(self, column_ids, row_ids, table):
'''Returns column ranks for all numeric columns.'''
pass
def _get_numeric_sort_key_fn(self, table_numeric_values, value):
'''
Returns the sort key function for comparing value to table values. The function returned will be a suitable
input for the key param of the sort(). See number_annotation_utils._get_numeric_sort_key_fn for details
Args:
table_numeric_values: Numeric values of a column
value: Numeric value in the question
Returns:
A function key function to compare column and question values.
'''
pass
def _get_numeric_relations(self, question, column_ids, row_ids, table):
'''
Returns numeric relations embeddings
Args:
question: Question object.
column_ids: Maps word piece position to column id.
row_ids: Maps word piece position to row id.
table: The table containing the numeric cell values.
'''
pass
def _get_numeric_values(self, table, column_ids, row_ids):
'''Returns numeric values for computation of answer loss.'''
pass
def _get_numeric_values_scale(self, table, column_ids, row_ids):
'''Returns a scale to each token to down weigh the value of long words.'''
pass
def _pad_to_seq_length(self, inputs):
pass
def _get_all_answer_ids_from_coordinates(self, column_ids, row_ids, answers_list):
'''Maps lists of answer coordinates to token indexes.'''
pass
def _get_all_answer_ids_from_coordinates(self, column_ids, row_ids, answers_list):
'''
Maps answer coordinates of a question to token indexes.
In the SQA format (TSV), the coordinates are given as (row, column) tuples. Here, we first swap them to
(column, row) format before calling _get_all_answer_ids_from_coordinates.
'''
pass
def _to_coordinates(answer_coordinates_question):
pass
def _find_tokens(self, text, segment):
'''Return start index of segment in text or None.'''
pass
def _find_answer_coordinates_from_answer_text(self, tokenized_table, answer_text):
'''Returns all occurrences of answer_text in the table.'''
pass
def _find_answer_ids_from_answer_texts(self, column_ids, row_ids, tokenized_table, answer_texts):
'''Maps question with answer texts to the first matching token indexes.'''
pass
def _get_answer_ids(self, column_ids, row_ids, answer_coordinates):
'''Maps answer coordinates of a question to token indexes.'''
pass
def get_answer_ids(self, column_ids, row_ids, tokenized_table, answer_texts_question, answer_coordinates_question):
pass
def _pad_to_seq_length(self, inputs):
'''
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
def _get_cell_token_probs(self, probabilities, segment_ids, row_ids, column_ids):
pass
def _get_mean_cell_probs(self, probabilities, segment_ids, row_ids, column_ids):
'''Computes average probability per cell, aggregating over tokens.'''
pass
def convert_logits_to_predictions(self, data, logits, logits_agg=None, cell_classification_threshold=0.5):
'''
Converts logits of [`TapasForQuestionAnswering`] to actual predicted answer coordinates and optional
aggregation indices.
The original implementation, on which this function is based, can be found
[here](https://github.com/google-research/tapas/blob/4908213eb4df7aa988573350278b44c4dbe3f71b/tapas/experiments/prediction_utils.py#L288).
Args:
data (`dict`):
Dictionary mapping features to actual values. Should be created using [`TapasTokenizer`].
logits (`torch.Tensor` of shape `(batch_size, sequence_length)`):
Tensor containing the logits at the token level.
logits_agg (`torch.Tensor` of shape `(batch_size, num_aggregation_labels)`, *optional*):
Tensor containing the aggregation logits.
cell_classification_threshold (`float`, *optional*, defaults to 0.5):
Threshold to be used for cell selection. All table cells for which their probability is larger than
this threshold will be selected.
Returns:
`tuple` comprising various elements depending on the inputs:
- predicted_answer_coordinates (`list[list[[tuple]]` of length `batch_size`): Predicted answer coordinates
as a list of lists of tuples. Each element in the list contains the predicted answer coordinates of a
single example in the batch, as a list of tuples. Each tuple is a cell, i.e. (row index, column index).
- predicted_aggregation_indices (`list[int]`of length `batch_size`, *optional*, returned when
`logits_aggregation` is provided): Predicted aggregation operator indices of the aggregation head.
'''
pass
| 64
| 38
| 30
| 3
| 22
| 6
| 4
| 0.32
| 1
| 24
| 8
| 0
| 55
| 12
| 55
| 144
| 1,835
| 212
| 1,235
| 530
| 893
| 394
| 611
| 243
| 554
| 25
| 3
| 6
| 246
|
5,612
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.TapasTruncationStrategy
|
from ...utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available, logging
class TapasTruncationStrategy(ExplicitEnum):
"""
Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE.
"""
DROP_ROWS_TO_FIT = 'drop_rows_to_fit'
DO_NOT_TRUNCATE = 'do_not_truncate'
|
class TapasTruncationStrategy(ExplicitEnum):
'''
Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 1
| 3
| 3
| 2
| 3
| 3
| 3
| 2
| 0
| 1
| 0
| 0
|
5,613
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.TokenCoordinates
|
from dataclasses import dataclass
@dataclass(frozen=True)
class TokenCoordinates:
column_index: int
row_index: int
token_index: int
|
@dataclass(frozen=True)
class TokenCoordinates:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0
| 4
| 1
| 3
| 0
| 4
| 1
| 3
| 0
| 0
| 0
| 0
|
5,614
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.TokenizedTable
|
from dataclasses import dataclass
@dataclass
class TokenizedTable:
rows: list[list[list[str]]]
selected_tokens: list[TokenCoordinates]
|
@dataclass
class TokenizedTable:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 0
| 0
| 0
|
5,615
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/tokenization_tapas.py
|
transformers.models.tapas.tokenization_tapas.WordpieceTokenizer
|
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
tokenization using the given vocabulary.
For example, `input = "unaffable"` will return as output `["un", "##aff", "##able"]`.
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through *BasicTokenizer*.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = ''.join(chars[start:end])
if start > 0:
substr = '##' + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
|
class WordpieceTokenizer:
'''Runs WordPiece tokenization.'''
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
pass
def tokenize(self, text):
'''
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
tokenization using the given vocabulary.
For example, `input = "unaffable"` will return as output `["un", "##aff", "##able"]`.
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through *BasicTokenizer*.
Returns:
A list of wordpiece tokens.
'''
pass
| 3
| 2
| 26
| 3
| 18
| 6
| 5
| 0.33
| 0
| 1
| 0
| 0
| 2
| 3
| 2
| 2
| 55
| 8
| 36
| 15
| 33
| 12
| 35
| 15
| 32
| 9
| 0
| 4
| 10
|
5,616
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/textnet/configuration_textnet.py
|
transformers.models.textnet.configuration_textnet.TextNetConfig
|
from transformers.utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
from transformers import PretrainedConfig
class TextNetConfig(BackboneConfigMixin, PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`TextNextModel`]. It is used to instantiate a
TextNext model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[czczup/textnet-base](https://huggingface.co/czczup/textnet-base). Configuration objects inherit from
[`PretrainedConfig`] and can be used to control the model outputs.Read the documentation from [`PretrainedConfig`]
for more information.
Args:
stem_kernel_size (`int`, *optional*, defaults to 3):
The kernel size for the initial convolution layer.
stem_stride (`int`, *optional*, defaults to 2):
The stride for the initial convolution layer.
stem_num_channels (`int`, *optional*, defaults to 3):
The num of channels in input for the initial convolution layer.
stem_out_channels (`int`, *optional*, defaults to 64):
The num of channels in out for the initial convolution layer.
stem_act_func (`str`, *optional*, defaults to `"relu"`):
The activation function for the initial convolution layer.
image_size (`tuple[int, int]`, *optional*, defaults to `[640, 640]`):
The size (resolution) of each image.
conv_layer_kernel_sizes (`list[list[list[int]]]`, *optional*):
A list of stage-wise kernel sizes. If `None`, defaults to:
`[[[3, 3], [3, 3], [3, 3]], [[3, 3], [1, 3], [3, 3], [3, 1]], [[3, 3], [3, 3], [3, 1], [1, 3]], [[3, 3], [3, 1], [1, 3], [3, 3]]]`.
conv_layer_strides (`list[list[int]]`, *optional*):
A list of stage-wise strides. If `None`, defaults to:
`[[1, 2, 1], [2, 1, 1, 1], [2, 1, 1, 1], [2, 1, 1, 1]]`.
hidden_sizes (`list[int]`, *optional*, defaults to `[64, 64, 128, 256, 512]`):
Dimensionality (hidden size) at each stage.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage.
Examples:
```python
>>> from transformers import TextNetConfig, TextNetBackbone
>>> # Initializing a TextNetConfig
>>> configuration = TextNetConfig()
>>> # Initializing a model (with random weights)
>>> model = TextNetBackbone(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'textnet'
def __init__(self, stem_kernel_size=3, stem_stride=2, stem_num_channels=3, stem_out_channels=64, stem_act_func='relu', image_size=[640, 640], conv_layer_kernel_sizes=None, conv_layer_strides=None, hidden_sizes=[64, 64, 128, 256, 512], batch_norm_eps=1e-05, initializer_range=0.02, out_features=None, out_indices=None, **kwargs):
super().__init__(**kwargs)
if conv_layer_kernel_sizes is None:
conv_layer_kernel_sizes = [[[3, 3], [3, 3], [3, 3]], [[3, 3], [1, 3], [3, 3], [3, 1]], [[3, 3], [3, 3], [3, 1], [1, 3]], [[3, 3], [3, 1], [1, 3], [3, 3]]]
if conv_layer_strides is None:
conv_layer_strides = [[1, 2, 1], [2, 1, 1, 1], [2, 1, 1, 1], [2, 1, 1, 1]]
self.stem_kernel_size = stem_kernel_size
self.stem_stride = stem_stride
self.stem_num_channels = stem_num_channels
self.stem_out_channels = stem_out_channels
self.stem_act_func = stem_act_func
self.image_size = image_size
self.conv_layer_kernel_sizes = conv_layer_kernel_sizes
self.conv_layer_strides = conv_layer_strides
self.initializer_range = initializer_range
self.hidden_sizes = hidden_sizes
self.batch_norm_eps = batch_norm_eps
self.depths = [len(layer) for layer in self.conv_layer_kernel_sizes]
self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, 5)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
|
class TextNetConfig(BackboneConfigMixin, PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`TextNextModel`]. It is used to instantiate a
TextNext model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[czczup/textnet-base](https://huggingface.co/czczup/textnet-base). Configuration objects inherit from
[`PretrainedConfig`] and can be used to control the model outputs.Read the documentation from [`PretrainedConfig`]
for more information.
Args:
stem_kernel_size (`int`, *optional*, defaults to 3):
The kernel size for the initial convolution layer.
stem_stride (`int`, *optional*, defaults to 2):
The stride for the initial convolution layer.
stem_num_channels (`int`, *optional*, defaults to 3):
The num of channels in input for the initial convolution layer.
stem_out_channels (`int`, *optional*, defaults to 64):
The num of channels in out for the initial convolution layer.
stem_act_func (`str`, *optional*, defaults to `"relu"`):
The activation function for the initial convolution layer.
image_size (`tuple[int, int]`, *optional*, defaults to `[640, 640]`):
The size (resolution) of each image.
conv_layer_kernel_sizes (`list[list[list[int]]]`, *optional*):
A list of stage-wise kernel sizes. If `None`, defaults to:
`[[[3, 3], [3, 3], [3, 3]], [[3, 3], [1, 3], [3, 3], [3, 1]], [[3, 3], [3, 3], [3, 1], [1, 3]], [[3, 3], [3, 1], [1, 3], [3, 3]]]`.
conv_layer_strides (`list[list[int]]`, *optional*):
A list of stage-wise strides. If `None`, defaults to:
`[[1, 2, 1], [2, 1, 1, 1], [2, 1, 1, 1], [2, 1, 1, 1]]`.
hidden_sizes (`list[int]`, *optional*, defaults to `[64, 64, 128, 256, 512]`):
Dimensionality (hidden size) at each stage.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage.
Examples:
```python
>>> from transformers import TextNetConfig, TextNetBackbone
>>> # Initializing a TextNetConfig
>>> configuration = TextNetConfig()
>>> # Initializing a model (with random weights)
>>> model = TextNetBackbone(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, stem_kernel_size=3, stem_stride=2, stem_num_channels=3, stem_out_channels=64, stem_act_func='relu', image_size=[640, 640], conv_layer_kernel_sizes=None, conv_layer_strides=None, hidden_sizes=[64, 64, 128, 256, 512], batch_norm_eps=1e-05, initializer_range=0.02, out_features=None, out_indices=None, **kwargs):
pass
| 2
| 1
| 48
| 5
| 43
| 0
| 3
| 1.11
| 2
| 2
| 0
| 0
| 1
| 15
| 1
| 38
| 108
| 13
| 45
| 33
| 27
| 50
| 22
| 17
| 20
| 3
| 2
| 1
| 3
|
5,617
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/textnet/image_processing_textnet.py
|
transformers.models.textnet.image_processing_textnet.TextNetImageProcessor
|
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from typing import Optional, Union
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments
from ...image_transforms import convert_to_rgb, get_resize_output_image_size, resize, to_channel_dimension_format
from ...utils import TensorType, is_vision_available, logging
import numpy as np
class TextNetImageProcessor(BaseImageProcessor):
"""
Constructs a TextNet image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 640}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
size_divisor (`int`, *optional*, defaults to 32):
Ensures height and width are rounded to a multiple of this value after resizing.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `False`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=False, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=IMAGENET_DEFAULT_MEAN, image_std: Optional[Union[float, list[float]]]=IMAGENET_DEFAULT_STD, do_convert_rgb: bool=True, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'shortest_edge': 640}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224}
crop_size = get_size_dict(crop_size, param_name='crop_size')
self.do_resize = do_resize
self.size = size
self.size_divisor = size_divisor
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.do_convert_rgb = do_convert_rgb
self._valid_processor_keys = ['images', 'do_resize', 'size', 'size_divisor', 'resample', 'do_center_crop', 'crop_size', 'do_rescale', 'rescale_factor', 'do_normalize', 'image_mean', 'image_std', 'do_convert_rgb', 'return_tensors', 'data_format', 'input_data_format']
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"] , with the longest edge
resized to keep the input aspect ratio. Both the height and width are resized to be divisible by 32.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
size_divisor (`int`, *optional*, defaults to `32`):
Ensures height and width are rounded to a multiple of this value after resizing.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
default_to_square (`bool`, *optional*, defaults to `False`):
The value to be passed to `get_size_dict` as `default_to_square` when computing the image size. If the
`size` argument in `get_size_dict` is an `int`, it determines whether to default to a square image or
not.Note that this attribute is not used in computing `crop_size` via calling `get_size_dict`.
"""
if 'shortest_edge' in size:
size = size['shortest_edge']
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
height, width = get_resize_output_image_size(image, size=size, input_data_format=input_data_format, default_to_square=False)
if height % self.size_divisor != 0:
height += self.size_divisor - height % self.size_divisor
if width % self.size_divisor != 0:
width += self.size_divisor - width % self.size_divisor
return resize(image, size=(height, width), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
size_divisor (`int`, *optional*, defaults to `32`):
Ensures height and width are rounded to a multiple of this value after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, param_name='size', default_to_square=False)
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True)
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
all_images.append(image)
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
|
class TextNetImageProcessor(BaseImageProcessor):
'''
Constructs a TextNet image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 640}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
size_divisor (`int`, *optional*, defaults to 32):
Ensures height and width are rounded to a multiple of this value after resizing.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `False`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=False, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=IMAGENET_DEFAULT_MEAN, image_std: Optional[Union[float, list[float]]]=IMAGENET_DEFAULT_STD, do_convert_rgb: bool=True, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image. The shortest edge of the image is resized to size["shortest_edge"] , with the longest edge
resized to keep the input aspect ratio. Both the height and width are resized to be divisible by 32.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
size_divisor (`int`, *optional*, defaults to `32`):
Ensures height and width are rounded to a multiple of this value after resizing.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
default_to_square (`bool`, *optional*, defaults to `False`):
The value to be passed to `get_size_dict` as `default_to_square` when computing the image size. If the
`size` argument in `get_size_dict` is an `int`, it determines whether to default to a square image or
not.Note that this attribute is not used in computing `crop_size` via calling `get_size_dict`.
'''
pass
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
size_divisor (`int`, *optional*, defaults to `32`):
Ensures height and width are rounded to a multiple of this value after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 4
| 3
| 86
| 6
| 54
| 25
| 11
| 0.68
| 1
| 8
| 2
| 0
| 3
| 13
| 3
| 23
| 302
| 24
| 165
| 63
| 119
| 113
| 73
| 21
| 69
| 22
| 3
| 2
| 32
|
5,618
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/textnet/modeling_textnet.py
|
transformers.models.textnet.modeling_textnet.TextNetBackbone
|
from torch import Tensor
from transformers.modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from transformers.utils.backbone_utils import BackboneMixin
from typing import Any, Optional, Union
from ...utils import auto_docstring
@auto_docstring(custom_intro='\n TextNet backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class TextNetBackbone(TextNetPreTrainedModel, BackboneMixin):
has_attentions = False
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.textnet = TextNetModel(config)
self.num_features = config.hidden_sizes
self.post_init()
@auto_docstring
def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[tuple], BackboneOutput]:
"""
Examples:
```python
>>> import torch
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("czczup/textnet-base")
>>> model = AutoBackbone.from_pretrained("czczup/textnet-base")
>>> inputs = processor(image, return_tensors="pt")
>>> with torch.no_grad():
>>> outputs = model(**inputs)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
outputs = self.textnet(pixel_values, output_hidden_states=True, return_dict=return_dict)
hidden_states = outputs.hidden_states if return_dict else outputs[2]
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
hidden_states = outputs.hidden_states if return_dict else outputs[2]
output += (hidden_states,)
return output
return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None)
|
@auto_docstring(custom_intro='\n TextNet backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class TextNetBackbone(TextNetPreTrainedModel, BackboneMixin):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[tuple], BackboneOutput]:
'''
Examples:
```python
>>> import torch
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("czczup/textnet-base")
>>> model = AutoBackbone.from_pretrained("czczup/textnet-base")
>>> inputs = processor(image, return_tensors="pt")
>>> with torch.no_grad():
>>> outputs = model(**inputs)
```'''
pass
| 5
| 1
| 30
| 6
| 15
| 9
| 6
| 0.52
| 2
| 6
| 1
| 0
| 2
| 2
| 2
| 142
| 63
| 13
| 33
| 13
| 26
| 17
| 23
| 10
| 20
| 10
| 3
| 2
| 11
|
5,619
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/textnet/modeling_textnet.py
|
transformers.models.textnet.modeling_textnet.TextNetConvLayer
|
from transformers.activations import ACT2CLS
import torch
from transformers.models.textnet.configuration_textnet import TextNetConfig
import torch.nn as nn
class TextNetConvLayer(nn.Module):
def __init__(self, config: TextNetConfig):
super().__init__()
self.kernel_size = config.stem_kernel_size
self.stride = config.stem_stride
self.activation_function = config.stem_act_func
padding = (config.kernel_size[0] // 2, config.kernel_size[1] // 2) if isinstance(config.stem_kernel_size, tuple) else config.stem_kernel_size // 2
self.conv = nn.Conv2d(config.stem_num_channels, config.stem_out_channels, kernel_size=config.stem_kernel_size, stride=config.stem_stride, padding=padding, bias=False)
self.batch_norm = nn.BatchNorm2d(config.stem_out_channels, config.batch_norm_eps)
self.activation = nn.Identity()
if self.activation_function is not None:
self.activation = ACT2CLS[self.activation_function]()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.batch_norm(hidden_states)
return self.activation(hidden_states)
|
class TextNetConvLayer(nn.Module):
def __init__(self, config: TextNetConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 15
| 2
| 13
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 6
| 2
| 12
| 32
| 5
| 27
| 10
| 24
| 0
| 16
| 10
| 13
| 3
| 1
| 1
| 4
|
5,620
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/textnet/modeling_textnet.py
|
transformers.models.textnet.modeling_textnet.TextNetEncoder
|
from transformers.modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
import torch.nn as nn
from typing import Any, Optional, Union
from transformers.models.textnet.configuration_textnet import TextNetConfig
import torch
class TextNetEncoder(nn.Module):
def __init__(self, config: TextNetConfig):
super().__init__()
stages = []
num_stages = len(config.conv_layer_kernel_sizes)
for stage_ix in range(num_stages):
stages.append(TextNetStage(config, stage_ix))
self.stages = nn.ModuleList(stages)
def forward(self, hidden_state: torch.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BaseModelOutputWithNoAttention:
hidden_states = [hidden_state]
for stage in self.stages:
hidden_state = stage(hidden_state)
hidden_states.append(hidden_state)
if not return_dict:
output = (hidden_state,)
return output + (hidden_states,) if output_hidden_states else output
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
|
class TextNetEncoder(nn.Module):
def __init__(self, config: TextNetConfig):
pass
def forward(self, hidden_state: torch.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BaseModelOutputWithNoAttention:
pass
| 3
| 0
| 13
| 2
| 11
| 0
| 3
| 0
| 1
| 7
| 1
| 0
| 2
| 1
| 2
| 12
| 27
| 5
| 22
| 15
| 14
| 0
| 17
| 10
| 14
| 4
| 1
| 1
| 6
|
5,621
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/textnet/modeling_textnet.py
|
transformers.models.textnet.modeling_textnet.TextNetForImageClassification
|
from ...utils import auto_docstring
import torch
from transformers.modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from typing import Any, Optional, Union
import torch.nn as nn
@auto_docstring(custom_intro='\n TextNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class TextNetForImageClassification(TextNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.textnet = TextNetModel(config)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.flatten = nn.Flatten()
self.fc = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
self.classifier = nn.ModuleList([self.avg_pool, self.flatten])
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> ImageClassifierOutputWithNoAttention:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import torch
>>> import requests
>>> from transformers import TextNetForImageClassification, TextNetImageProcessor
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = TextNetImageProcessor.from_pretrained("czczup/textnet-base")
>>> model = TextNetForImageClassification.from_pretrained("czczup/textnet-base")
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> outputs.logits.shape
torch.Size([1, 2])
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.textnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
for layer in self.classifier:
last_hidden_state = layer(last_hidden_state)
logits = self.fc(last_hidden_state)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n TextNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class TextNetForImageClassification(TextNetPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> ImageClassifierOutputWithNoAttention:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import torch
>>> import requests
>>> from transformers import TextNetForImageClassification, TextNetImageProcessor
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = TextNetImageProcessor.from_pretrained("czczup/textnet-base")
>>> model = TextNetForImageClassification.from_pretrained("czczup/textnet-base")
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> outputs.logits.shape
torch.Size([1, 2])
```'''
pass
| 5
| 1
| 41
| 6
| 24
| 12
| 8
| 0.48
| 1
| 5
| 1
| 0
| 2
| 6
| 2
| 130
| 86
| 12
| 50
| 23
| 39
| 24
| 37
| 16
| 34
| 13
| 3
| 3
| 15
|
5,622
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/textnet/modeling_textnet.py
|
transformers.models.textnet.modeling_textnet.TextNetModel
|
from transformers.modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...utils import auto_docstring
from typing import Any, Optional, Union
import torch.nn as nn
from torch import Tensor
@auto_docstring
class TextNetModel(TextNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.stem = TextNetConvLayer(config)
self.encoder = TextNetEncoder(config)
self.pooler = nn.AdaptiveAvgPool2d((2, 2))
self.post_init()
@auto_docstring
def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[Any, list[Any]], tuple[Any], BaseModelOutputWithPoolingAndNoAttention]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
hidden_state = self.stem(pixel_values)
encoder_outputs = self.encoder(hidden_state, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
if not return_dict:
output = (last_hidden_state, pooled_output)
return output + (encoder_outputs[1],) if output_hidden_states else output
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs[1] if output_hidden_states else None)
|
@auto_docstring
class TextNetModel(TextNetPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[Any, list[Any]], tuple[Any], BaseModelOutputWithPoolingAndNoAttention]:
pass
| 5
| 0
| 16
| 3
| 14
| 0
| 4
| 0
| 1
| 7
| 2
| 0
| 2
| 3
| 2
| 130
| 42
| 6
| 36
| 14
| 23
| 0
| 18
| 11
| 15
| 6
| 3
| 1
| 7
|
5,623
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/textnet/modeling_textnet.py
|
transformers.models.textnet.modeling_textnet.TextNetPreTrainedModel
|
import torch.nn as nn
from transformers.models.textnet.configuration_textnet import TextNetConfig
from transformers import PreTrainedModel
from ...utils import auto_docstring
@auto_docstring
class TextNetPreTrainedModel(PreTrainedModel):
config: TextNetConfig
base_model_prefix = 'textnet'
main_input_name = 'pixel_values'
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1.0)
if module.bias is not None:
module.bias.data.zero_()
|
@auto_docstring
class TextNetPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 5
| 0.31
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 128
| 19
| 2
| 13
| 5
| 11
| 4
| 12
| 5
| 10
| 5
| 2
| 2
| 5
|
5,624
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/textnet/modeling_textnet.py
|
transformers.models.textnet.modeling_textnet.TextNetRepConvLayer
|
import torch.nn as nn
from transformers.models.textnet.configuration_textnet import TextNetConfig
import torch
class TextNetRepConvLayer(nn.Module):
"""
This layer supports re-parameterization by combining multiple convolutional branches
(e.g., main convolution, vertical, horizontal, and identity branches) during training.
At inference time, these branches can be collapsed into a single convolution for
efficiency, as per the re-parameterization paradigm.
The "Rep" in the name stands for "re-parameterization" (introduced by RepVGG).
"""
def __init__(self, config: TextNetConfig, in_channels: int, out_channels: int, kernel_size: int, stride: int):
super().__init__()
self.num_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
padding = ((kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2)
self.activation_function = nn.ReLU()
self.main_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.main_batch_norm = nn.BatchNorm2d(num_features=out_channels, eps=config.batch_norm_eps)
vertical_padding = ((kernel_size[0] - 1) // 2, 0)
horizontal_padding = (0, (kernel_size[1] - 1) // 2)
if kernel_size[1] != 1:
self.vertical_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(kernel_size[0], 1), stride=stride, padding=vertical_padding, bias=False)
self.vertical_batch_norm = nn.BatchNorm2d(num_features=out_channels, eps=config.batch_norm_eps)
else:
self.vertical_conv, self.vertical_batch_norm = (None, None)
if kernel_size[0] != 1:
self.horizontal_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, kernel_size[1]), stride=stride, padding=horizontal_padding, bias=False)
self.horizontal_batch_norm = nn.BatchNorm2d(num_features=out_channels, eps=config.batch_norm_eps)
else:
self.horizontal_conv, self.horizontal_batch_norm = (None, None)
self.rbr_identity = nn.BatchNorm2d(num_features=in_channels, eps=config.batch_norm_eps) if out_channels == in_channels and stride == 1 else None
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
main_outputs = self.main_conv(hidden_states)
main_outputs = self.main_batch_norm(main_outputs)
if self.vertical_conv is not None:
vertical_outputs = self.vertical_conv(hidden_states)
vertical_outputs = self.vertical_batch_norm(vertical_outputs)
main_outputs = main_outputs + vertical_outputs
if self.horizontal_conv is not None:
horizontal_outputs = self.horizontal_conv(hidden_states)
horizontal_outputs = self.horizontal_batch_norm(horizontal_outputs)
main_outputs = main_outputs + horizontal_outputs
if self.rbr_identity is not None:
id_out = self.rbr_identity(hidden_states)
main_outputs = main_outputs + id_out
return self.activation_function(main_outputs)
|
class TextNetRepConvLayer(nn.Module):
'''
This layer supports re-parameterization by combining multiple convolutional branches
(e.g., main convolution, vertical, horizontal, and identity branches) during training.
At inference time, these branches can be collapsed into a single convolution for
efficiency, as per the re-parameterization paradigm.
The "Rep" in the name stands for "re-parameterization" (introduced by RepVGG).
'''
def __init__(self, config: TextNetConfig, in_channels: int, out_channels: int, kernel_size: int, stride: int):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 39
| 6
| 32
| 1
| 4
| 0.14
| 1
| 4
| 0
| 0
| 2
| 12
| 2
| 12
| 88
| 15
| 64
| 22
| 61
| 9
| 37
| 22
| 34
| 4
| 1
| 1
| 8
|
5,625
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/textnet/modeling_textnet.py
|
transformers.models.textnet.modeling_textnet.TextNetStage
|
import torch.nn as nn
from transformers.models.textnet.configuration_textnet import TextNetConfig
class TextNetStage(nn.Module):
def __init__(self, config: TextNetConfig, depth: int):
super().__init__()
kernel_size = config.conv_layer_kernel_sizes[depth]
stride = config.conv_layer_strides[depth]
num_layers = len(kernel_size)
stage_in_channel_size = config.hidden_sizes[depth]
stage_out_channel_size = config.hidden_sizes[depth + 1]
in_channels = [stage_in_channel_size] + [stage_out_channel_size] * (num_layers - 1)
out_channels = [stage_out_channel_size] * num_layers
stage = []
for stage_config in zip(in_channels, out_channels, kernel_size, stride):
stage.append(TextNetRepConvLayer(config, *stage_config))
self.stage = nn.ModuleList(stage)
def forward(self, hidden_state):
for block in self.stage:
hidden_state = block(hidden_state)
return hidden_state
|
class TextNetStage(nn.Module):
def __init__(self, config: TextNetConfig, depth: int):
pass
def forward(self, hidden_state):
pass
| 3
| 0
| 10
| 2
| 9
| 0
| 2
| 0
| 1
| 5
| 1
| 0
| 2
| 1
| 2
| 12
| 22
| 4
| 18
| 14
| 15
| 0
| 18
| 14
| 15
| 2
| 1
| 1
| 4
|
5,626
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py
|
transformers.models.time_series_transformer.configuration_time_series_transformer.TimeSeriesTransformerConfig
|
from ...configuration_utils import PretrainedConfig
from typing import Optional, Union
class TimeSeriesTransformerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`TimeSeriesTransformerModel`]. It is used to
instantiate a Time Series Transformer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Time Series
Transformer
[huggingface/time-series-transformer-tourism-monthly](https://huggingface.co/huggingface/time-series-transformer-tourism-monthly)
architecture.
Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
prediction_length (`int`):
The prediction length for the decoder. In other words, the prediction horizon of the model. This value is
typically dictated by the dataset and we recommend to set it appropriately.
context_length (`int`, *optional*, defaults to `prediction_length`):
The context length for the encoder. If `None`, the context length will be the same as the
`prediction_length`.
distribution_output (`string`, *optional*, defaults to `"student_t"`):
The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
loss (`string`, *optional*, defaults to `"nll"`):
The loss function for the model corresponding to the `distribution_output` head. For parametric
distributions it is the negative log likelihood (nll) - which currently is the only supported one.
input_size (`int`, *optional*, defaults to 1):
The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
multivariate targets.
scaling (`string` or `bool`, *optional* defaults to `"mean"`):
Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
scaler is set to "mean".
lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
The lags of the input time series as covariates often dictated by the frequency of the data. Default is
`[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.
num_time_features (`int`, *optional*, defaults to 0):
The number of time features in the input time series.
num_dynamic_real_features (`int`, *optional*, defaults to 0):
The number of dynamic real valued features.
num_static_categorical_features (`int`, *optional*, defaults to 0):
The number of static categorical features.
num_static_real_features (`int`, *optional*, defaults to 0):
The number of static real valued features.
cardinality (`list[int]`, *optional*):
The cardinality (number of different values) for each of the static categorical features. Should be a list
of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
embedding_dimension (`list[int]`, *optional*):
The dimension of the embedding for each of the static categorical features. Should be a list of integers,
having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
d_model (`int`, *optional*, defaults to 64):
Dimensionality of the transformer layers.
encoder_layers (`int`, *optional*, defaults to 2):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 2):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
decoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
`"relu"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the encoder, and decoder.
encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each encoder layer.
decoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each decoder layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability used between the two layers of the feed-forward networks.
num_parallel_samples (`int`, *optional*, defaults to 100):
The number of samples to generate in parallel for each time step of inference.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal weight initialization distribution.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
Example:
```python
>>> from transformers import TimeSeriesTransformerConfig, TimeSeriesTransformerModel
>>> # Initializing a Time Series Transformer configuration with 12 time steps for prediction
>>> configuration = TimeSeriesTransformerConfig(prediction_length=12)
>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = TimeSeriesTransformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'time_series_transformer'
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers'}
def __init__(self, prediction_length: Optional[int]=None, context_length: Optional[int]=None, distribution_output: str='student_t', loss: str='nll', input_size: int=1, lags_sequence: list[int]=[1, 2, 3, 4, 5, 6, 7], scaling: Optional[Union[str, bool]]='mean', num_dynamic_real_features: int=0, num_static_categorical_features: int=0, num_static_real_features: int=0, num_time_features: int=0, cardinality: Optional[list[int]]=None, embedding_dimension: Optional[list[int]]=None, encoder_ffn_dim: int=32, decoder_ffn_dim: int=32, encoder_attention_heads: int=2, decoder_attention_heads: int=2, encoder_layers: int=2, decoder_layers: int=2, is_encoder_decoder: bool=True, activation_function: str='gelu', d_model: int=64, dropout: float=0.1, encoder_layerdrop: float=0.1, decoder_layerdrop: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, num_parallel_samples: int=100, init_std: float=0.02, use_cache=True, **kwargs):
self.prediction_length = prediction_length
self.context_length = context_length or prediction_length
self.distribution_output = distribution_output
self.loss = loss
self.input_size = input_size
self.num_time_features = num_time_features
self.lags_sequence = lags_sequence
self.scaling = scaling
self.num_dynamic_real_features = num_dynamic_real_features
self.num_static_real_features = num_static_real_features
self.num_static_categorical_features = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(cardinality) != num_static_categorical_features:
raise ValueError('The cardinality should be a list of the same length as `num_static_categorical_features`')
self.cardinality = cardinality
else:
self.cardinality = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(embedding_dimension) != num_static_categorical_features:
raise ValueError('The embedding dimension should be a list of the same length as `num_static_categorical_features`')
self.embedding_dimension = embedding_dimension
else:
self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
self.num_parallel_samples = num_parallel_samples
self.feature_size = input_size * len(lags_sequence) + self._number_of_features
self.d_model = d_model
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.encoder_ffn_dim = encoder_ffn_dim
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_layers = encoder_layers
self.decoder_layers = decoder_layers
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
@property
def _number_of_features(self) -> int:
return sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2
|
class TimeSeriesTransformerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`TimeSeriesTransformerModel`]. It is used to
instantiate a Time Series Transformer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Time Series
Transformer
[huggingface/time-series-transformer-tourism-monthly](https://huggingface.co/huggingface/time-series-transformer-tourism-monthly)
architecture.
Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
prediction_length (`int`):
The prediction length for the decoder. In other words, the prediction horizon of the model. This value is
typically dictated by the dataset and we recommend to set it appropriately.
context_length (`int`, *optional*, defaults to `prediction_length`):
The context length for the encoder. If `None`, the context length will be the same as the
`prediction_length`.
distribution_output (`string`, *optional*, defaults to `"student_t"`):
The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
loss (`string`, *optional*, defaults to `"nll"`):
The loss function for the model corresponding to the `distribution_output` head. For parametric
distributions it is the negative log likelihood (nll) - which currently is the only supported one.
input_size (`int`, *optional*, defaults to 1):
The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
multivariate targets.
scaling (`string` or `bool`, *optional* defaults to `"mean"`):
Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
scaler is set to "mean".
lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
The lags of the input time series as covariates often dictated by the frequency of the data. Default is
`[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.
num_time_features (`int`, *optional*, defaults to 0):
The number of time features in the input time series.
num_dynamic_real_features (`int`, *optional*, defaults to 0):
The number of dynamic real valued features.
num_static_categorical_features (`int`, *optional*, defaults to 0):
The number of static categorical features.
num_static_real_features (`int`, *optional*, defaults to 0):
The number of static real valued features.
cardinality (`list[int]`, *optional*):
The cardinality (number of different values) for each of the static categorical features. Should be a list
of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
embedding_dimension (`list[int]`, *optional*):
The dimension of the embedding for each of the static categorical features. Should be a list of integers,
having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
d_model (`int`, *optional*, defaults to 64):
Dimensionality of the transformer layers.
encoder_layers (`int`, *optional*, defaults to 2):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 2):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
decoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
`"relu"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the encoder, and decoder.
encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each encoder layer.
decoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each decoder layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability used between the two layers of the feed-forward networks.
num_parallel_samples (`int`, *optional*, defaults to 100):
The number of samples to generate in parallel for each time step of inference.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal weight initialization distribution.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
Example:
```python
>>> from transformers import TimeSeriesTransformerConfig, TimeSeriesTransformerModel
>>> # Initializing a Time Series Transformer configuration with 12 time steps for prediction
>>> configuration = TimeSeriesTransformerConfig(prediction_length=12)
>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = TimeSeriesTransformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, prediction_length: Optional[int]=None, context_length: Optional[int]=None, distribution_output: str='student_t', loss: str='nll', input_size: int=1, lags_sequence: list[int]=[1, 2, 3, 4, 5, 6, 7], scaling: Optional[Union[str, bool]]='mean', num_dynamic_real_features: int=0, num_static_categorical_features: int=0, num_static_real_features: int=0, num_time_features: int=0, cardinality: Optional[list[int]]=None, embedding_dimension: Optional[list[int]]=None, encoder_ffn_dim: int=32, decoder_ffn_dim: int=32, encoder_attention_heads: int=2, decoder_attention_heads: int=2, encoder_layers: int=2, decoder_layers: int=2, is_encoder_decoder: bool=True, activation_function: str='gelu', d_model: int=64, dropout: float=0.1, encoder_layerdrop: float=0.1, decoder_layerdrop: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, num_parallel_samples: int=100, init_std: float=0.02, use_cache=True, **kwargs):
pass
@property
def _number_of_features(self) -> int:
pass
| 4
| 1
| 47
| 3
| 44
| 2
| 3
| 0.97
| 1
| 6
| 0
| 0
| 2
| 30
| 2
| 2
| 201
| 15
| 95
| 69
| 58
| 92
| 45
| 35
| 42
| 5
| 1
| 2
| 6
|
5,627
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesFeatureEmbedder
|
from torch import nn
import torch
class TimeSeriesFeatureEmbedder(nn.Module):
"""
Embed a sequence of categorical features.
Args:
cardinalities (`list[int]`):
List of cardinalities of the categorical features.
embedding_dims (`list[int]`):
List of embedding dimensions of the categorical features.
"""
def __init__(self, cardinalities: list[int], embedding_dims: list[int]) -> None:
super().__init__()
self.num_features = len(cardinalities)
self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.num_features > 1:
cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)
else:
cat_feature_slices = [features]
return torch.cat([embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)], dim=-1)
|
class TimeSeriesFeatureEmbedder(nn.Module):
'''
Embed a sequence of categorical features.
Args:
cardinalities (`list[int]`):
List of cardinalities of the categorical features.
embedding_dims (`list[int]`):
List of embedding dimensions of the categorical features.
'''
def __init__(self, cardinalities: list[int], embedding_dims: list[int]) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 10
| 1
| 8
| 1
| 2
| 0.59
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 32
| 5
| 17
| 6
| 14
| 10
| 10
| 6
| 7
| 2
| 1
| 1
| 3
|
5,628
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler
|
import torch
from torch import nn
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
class TimeSeriesMeanScaler(nn.Module):
"""
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
"""
def __init__(self, config: TimeSeriesTransformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-10
self.default_scale = config.default_scale if hasattr(config, 'default_scale') else None
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
num_observed = observed_indicator.sum(self.dim, keepdim=True)
scale = ts_sum / torch.clamp(num_observed, min=1)
if self.default_scale is None:
batch_sum = ts_sum.sum(dim=0)
batch_observations = torch.clamp(num_observed.sum(0), min=1)
default_scale = torch.squeeze(batch_sum / batch_observations)
else:
default_scale = self.default_scale * torch.ones_like(scale)
scale = torch.where(num_observed > 0, scale, default_scale)
scale = torch.clamp(scale, min=self.minimum_scale)
scaled_data = data / scale
if not self.keepdim:
scale = scale.squeeze(dim=self.dim)
return (scaled_data, torch.zeros_like(scale), scale)
|
class TimeSeriesMeanScaler(nn.Module):
'''
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
'''
def __init__(self, config: TimeSeriesTransformerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 23
| 3
| 12
| 8
| 4
| 0.76
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 52
| 8
| 25
| 16
| 20
| 19
| 22
| 14
| 19
| 5
| 1
| 1
| 8
|
5,629
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler
|
from torch import nn
from typing import Callable, Optional, Union
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
import torch
class TimeSeriesNOPScaler(nn.Module):
"""
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
"""
def __init__(self, config: TimeSeriesTransformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
return (data, loc, scale)
|
class TimeSeriesNOPScaler(nn.Module):
'''
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
'''
def __init__(self, config: TimeSeriesTransformerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 10
| 0
| 5
| 5
| 2
| 1.09
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 25
| 2
| 11
| 9
| 6
| 12
| 9
| 7
| 6
| 3
| 1
| 0
| 4
|
5,630
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesSinusoidalPositionalEmbedding
|
from torch import nn
from typing import Callable, Optional, Union
import numpy as np
import torch
class TimeSeriesSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None:
super().__init__(num_positions, embedding_dim)
def _init_weight(self):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = self.weight.shape
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out = torch.empty(n_pos, dim, dtype=self.weight.dtype, requires_grad=False)
sentinel = dim // 2 if dim % 2 == 0 else dim // 2 + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
self.weight = nn.Parameter(out, requires_grad=False)
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids_shape[:2]
position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device)
return super().forward(position_ids)
|
class TimeSeriesSinusoidalPositionalEmbedding(nn.Embedding):
'''This module produces sinusoidal positional embeddings of any length.'''
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None:
pass
def _init_weight(self):
'''
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
'''
pass
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
'''`input_ids_shape` is expected to be [bsz x seqlen].'''
pass
| 5
| 3
| 8
| 0
| 7
| 2
| 1
| 0.3
| 1
| 4
| 0
| 0
| 2
| 1
| 3
| 3
| 32
| 3
| 23
| 12
| 17
| 7
| 17
| 10
| 13
| 2
| 1
| 0
| 4
|
5,631
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler
|
from torch import nn
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
import torch
class TimeSeriesStdScaler(nn.Module):
"""
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
"""
def __init__(self, config: TimeSeriesTransformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-05
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
denominator = denominator.clamp_min(1.0)
loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
scale = torch.sqrt(variance + self.minimum_scale)
return ((data - loc) / scale, loc, scale)
|
class TimeSeriesStdScaler(nn.Module):
'''
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
'''
def __init__(self, config: TimeSeriesTransformerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 13
| 1
| 7
| 6
| 3
| 1
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 33
| 3
| 15
| 12
| 10
| 15
| 13
| 10
| 10
| 4
| 1
| 0
| 5
|
5,632
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerAttention
|
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
from ...processing_utils import Unpack
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
import torch
class TimeSeriesTransformerAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[TimeSeriesTransformerConfig]=None, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights)
|
class TimeSeriesTransformerAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[TimeSeriesTransformerConfig]=None, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 50
| 7
| 35
| 8
| 5
| 0.24
| 1
| 7
| 1
| 0
| 3
| 12
| 3
| 13
| 156
| 23
| 107
| 44
| 86
| 26
| 68
| 27
| 64
| 12
| 1
| 2
| 15
|
5,633
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerDecoder
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
import torch
from torch import nn
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
class TimeSeriesTransformerDecoder(TimeSeriesTransformerPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
[`TimeSeriesTransformerDecoderLayer`]
Args:
config: TimeSeriesTransformerConfig
"""
def __init__(self, config: TimeSeriesTransformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
if config.prediction_length is None:
raise ValueError('The `prediction_length` config needs to be specified.')
self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model)
self.layers = nn.ModuleList([TimeSeriesTransformerDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.post_init()
def forward(self, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
"""
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_shape = inputs_embeds.size()[:-1]
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + input_shape[1], device=inputs_embeds.device)
attention_mask = self._update_causal_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)
encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds)
hidden_states = self.value_embedding(inputs_embeds)
embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length)
hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
|
class TimeSeriesTransformerDecoder(TimeSeriesTransformerPreTrainedModel):
'''
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
[`TimeSeriesTransformerDecoderLayer`]
Args:
config: TimeSeriesTransformerConfig
'''
def __init__(self, config: TimeSeriesTransformerConfig):
pass
def forward(self, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
'''
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
'''
pass
| 3
| 2
| 103
| 14
| 61
| 28
| 18
| 0.5
| 1
| 13
| 5
| 0
| 2
| 7
| 2
| 3
| 215
| 31
| 123
| 37
| 107
| 61
| 63
| 24
| 60
| 33
| 2
| 3
| 35
|
5,634
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerDecoderLayer
|
from ...utils.deprecation import deprecate_kwarg
from ...modeling_layers import GradientCheckpointingLayer
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
from ...activations import ACT2FN
from torch import nn
import torch
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
class TimeSeriesTransformerDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: TimeSeriesTransformerConfig, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = TimeSeriesTransformerAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, layer_idx=layer_idx)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = TimeSeriesTransformerAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
class TimeSeriesTransformerDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: TimeSeriesTransformerConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
'''
pass
| 4
| 1
| 58
| 6
| 40
| 13
| 4
| 0.31
| 1
| 4
| 1
| 0
| 2
| 11
| 2
| 12
| 118
| 12
| 81
| 32
| 67
| 25
| 44
| 21
| 41
| 6
| 1
| 1
| 7
|
5,635
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerEncoder
|
from typing import Callable, Optional, Union
from torch import nn
import torch
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput
class TimeSeriesTransformerEncoder(TimeSeriesTransformerPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TimeSeriesTransformerEncoderLayer`].
Args:
config: TimeSeriesTransformerConfig
"""
def __init__(self, config: TimeSeriesTransformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
if config.prediction_length is None:
raise ValueError('The `prediction_length` config needs to be specified.')
self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model)
self.layers = nn.ModuleList([TimeSeriesTransformerEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.post_init()
def forward(self, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
"""
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = self.value_embedding(inputs_embeds)
embed_pos = self.embed_positions(inputs_embeds.size())
hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
attention_mask = self._update_full_mask(attention_mask, inputs_embeds)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class TimeSeriesTransformerEncoder(TimeSeriesTransformerPreTrainedModel):
'''
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TimeSeriesTransformerEncoderLayer`].
Args:
config: TimeSeriesTransformerConfig
'''
def __init__(self, config: TimeSeriesTransformerConfig):
pass
def forward(self, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
'''
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 63
| 9
| 39
| 15
| 11
| 0.46
| 1
| 12
| 5
| 0
| 2
| 7
| 2
| 3
| 135
| 21
| 79
| 26
| 68
| 36
| 49
| 18
| 46
| 20
| 2
| 3
| 22
|
5,636
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerEncoderLayer
|
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
from ...modeling_layers import GradientCheckpointingLayer
from typing import Callable, Optional, Union
from ...activations import ACT2FN
from torch import nn
import torch
class TimeSeriesTransformerEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: TimeSeriesTransformerConfig, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = TimeSeriesTransformerAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, layer_idx=layer_idx)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class TimeSeriesTransformerEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: TimeSeriesTransformerConfig, layer_idx: Optional[int]=None):
pass
def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 33
| 3
| 25
| 6
| 2
| 0.22
| 1
| 3
| 1
| 0
| 2
| 9
| 2
| 12
| 68
| 7
| 50
| 22
| 41
| 11
| 32
| 16
| 29
| 3
| 1
| 1
| 4
|
5,637
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerForPrediction
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
import torch
from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput
@auto_docstring
class TimeSeriesTransformerForPrediction(TimeSeriesTransformerPreTrainedModel):
def __init__(self, config: TimeSeriesTransformerConfig):
super().__init__(config)
self.model = TimeSeriesTransformerModel(config)
if config.distribution_output == 'student_t':
self.distribution_output = StudentTOutput(dim=config.input_size)
elif config.distribution_output == 'normal':
self.distribution_output = NormalOutput(dim=config.input_size)
elif config.distribution_output == 'negative_binomial':
self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
else:
raise ValueError(f'Unknown distribution output {config.distribution_output}')
self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model)
self.target_shape = self.distribution_output.event_shape
if config.loss == 'nll':
self.loss = nll
else:
raise ValueError(f'Unknown loss function {config.loss}')
self.post_init()
def output_params(self, dec_output):
return self.parameter_projection(dec_output)
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
@torch.jit.ignore
def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
sliced_params = params
if trailing_n is not None:
sliced_params = [p[:, -trailing_n:] for p in params]
return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, future_observed_mask: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Seq2SeqTSModelOutput, tuple]:
"""
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
This mask is used to filter out missing values for the final loss calculation.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import TimeSeriesTransformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = TimeSeriesTransformerForPrediction.from_pretrained(
... "huggingface/time-series-transformer-tourism-monthly"
... )
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values
>>> # as well as possible additional features
>>> # the model autoregressively generates future values
>>> outputs = model.generate(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_time_features=batch["future_time_features"],
... )
>>> mean_prediction = outputs.sequences.mean(dim=1)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if future_values is not None:
use_cache = False
outputs = self.model(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, use_cache=use_cache, return_dict=return_dict, cache_position=cache_position)
prediction_loss = None
params = None
if future_values is not None:
params = self.output_params(outputs[0])
distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])
loss = self.loss(distribution, future_values)
if future_observed_mask is None:
future_observed_mask = torch.ones_like(future_values)
if len(self.target_shape) == 0:
loss_weights = future_observed_mask
else:
loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)
prediction_loss = weighted_average(loss, weights=loss_weights)
if not return_dict:
outputs = (params,) + outputs[1:] if params is not None else outputs[1:]
return (prediction_loss,) + outputs if prediction_loss is not None else outputs
return Seq2SeqTSPredictionOutput(loss=prediction_loss, params=params, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loc=outputs.loc, scale=outputs.scale, static_features=outputs.static_features)
@torch.no_grad()
def generate(self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> SampleTSPredictionOutput:
"""
Greedily generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size
of this tensor must be larger than the `context_length` of the model, since the model will use the
larger size to construct lag features, i.e. additional values from the past which are added in order to
serve as "extra context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
of the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features,
such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
of variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things
like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
These could also be so-called "age" features, which basically help the model know "at which point in
life" a time-series is. Age features have small values for distant past time steps and increase
monotonically the more we approach the current time step. Holiday features are also a good example of
time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to sampled
predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
(for instance as Fourier features). These could also be so-called "age" features, which basically help
the model know "at which point in life" a time-series is. Age features have small values for distant
past time steps and increase monotonically the more we approach the current time step. Holiday features
are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to
the values of the time series.
Static categorical features are features which have the same value for all time steps (static over
time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers.
Return:
[`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
multivariate predictions.
"""
outputs = self(static_categorical_features=static_categorical_features, static_real_features=static_real_features, past_time_features=past_time_features, past_values=past_values, past_observed_mask=past_observed_mask, future_time_features=future_time_features, future_values=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, use_cache=True)
decoder = self.model.get_decoder()
enc_last_hidden = outputs.encoder_last_hidden_state
loc = outputs.loc
scale = outputs.scale
static_feat = outputs.static_features
num_parallel_samples = self.config.num_parallel_samples
repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_past_values = (past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc) / repeated_scale
expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1)
features = torch.cat((expanded_static_feat, future_time_features), dim=-1)
repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)
future_samples = []
for k in range(self.config.prediction_length):
lagged_sequence = self.model.get_lagged_subsequences(sequence=repeated_past_values, subsequences_length=1 + k, shift=1)
lags_shape = lagged_sequence.shape
reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, :k + 1]), dim=-1)
dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden)
dec_last_hidden = dec_output.last_hidden_state
params = self.parameter_projection(dec_last_hidden[:, -1:])
distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)
next_sample = distr.sample()
repeated_past_values = torch.cat((repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1)
future_samples.append(next_sample)
concat_future_samples = torch.cat(future_samples, dim=1)
return SampleTSPredictionOutput(sequences=concat_future_samples.reshape((-1, num_parallel_samples, self.config.prediction_length) + self.target_shape))
|
@auto_docstring
class TimeSeriesTransformerForPrediction(TimeSeriesTransformerPreTrainedModel):
def __init__(self, config: TimeSeriesTransformerConfig):
pass
def output_params(self, dec_output):
pass
def get_encoder(self):
pass
def get_decoder(self):
pass
@torch.jit.ignore
def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
pass
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, future_observed_mask: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Seq2SeqTSModelOutput, tuple]:
'''
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
This mask is used to filter out missing values for the final loss calculation.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import TimeSeriesTransformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = TimeSeriesTransformerForPrediction.from_pretrained(
... "huggingface/time-series-transformer-tourism-monthly"
... )
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values
>>> # as well as possible additional features
>>> # the model autoregressively generates future values
>>> outputs = model.generate(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_time_features=batch["future_time_features"],
... )
>>> mean_prediction = outputs.sequences.mean(dim=1)
```'''
pass
@torch.no_grad()
def generate(self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> SampleTSPredictionOutput:
'''
Greedily generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size
of this tensor must be larger than the `context_length` of the model, since the model will use the
larger size to construct lag features, i.e. additional values from the past which are added in order to
serve as "extra context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
of the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features,
such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
of variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things
like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
These could also be so-called "age" features, which basically help the model know "at which point in
life" a time-series is. Age features have small values for distant past time steps and increase
monotonically the more we approach the current time step. Holiday features are also a good example of
time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to sampled
predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
(for instance as Fourier features). These could also be so-called "age" features, which basically help
the model know "at which point in life" a time-series is. Age features have small values for distant
past time steps and increase monotonically the more we approach the current time step. Holiday features
are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to
the values of the time series.
Static categorical features are features which have the same value for all time steps (static over
time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers.
Return:
[`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
multivariate predictions.
'''
pass
| 12
| 2
| 47
| 7
| 24
| 16
| 3
| 0.66
| 1
| 14
| 8
| 0
| 7
| 5
| 7
| 8
| 341
| 58
| 171
| 80
| 129
| 113
| 77
| 47
| 69
| 9
| 2
| 2
| 21
|
5,638
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerModel
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
import torch
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput
@auto_docstring
class TimeSeriesTransformerModel(TimeSeriesTransformerPreTrainedModel):
def __init__(self, config: TimeSeriesTransformerConfig):
super().__init__(config)
if config.scaling == 'mean' or config.scaling is True:
self.scaler = TimeSeriesMeanScaler(config)
elif config.scaling == 'std':
self.scaler = TimeSeriesStdScaler(config)
else:
self.scaler = TimeSeriesNOPScaler(config)
if config.num_static_categorical_features > 0:
self.embedder = TimeSeriesFeatureEmbedder(cardinalities=config.cardinality, embedding_dims=config.embedding_dimension)
self.encoder = TimeSeriesTransformerEncoder(config)
self.decoder = TimeSeriesTransformerDecoder(config)
self.post_init()
@property
def _past_length(self) -> int:
return self.config.context_length + max(self.config.lags_sequence)
def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor:
"""
Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I),
where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i,
j, :, k] = sequence[i, -indices[k]-S+j, :].
Args:
sequence: Tensor
The sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
subsequences_length : int
Length of the subsequences to be extracted.
shift: int
Shift the lags by this amount back.
"""
sequence_length = sequence.shape[1]
indices = [lag - shift for lag in self.config.lags_sequence]
if max(indices) + subsequences_length > sequence_length:
raise ValueError(f'lags cannot go further than history length, found lag {max(indices)} while history length is only {sequence_length}')
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(sequence[:, begin_index:end_index, ...])
return torch.stack(lagged_values, dim=-1)
def create_network_inputs(self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None):
time_feat = torch.cat((past_time_features[:, self._past_length - self.config.context_length:, ...], future_time_features), dim=1) if future_values is not None else past_time_features[:, self._past_length - self.config.context_length:, ...]
if past_observed_mask is None:
past_observed_mask = torch.ones_like(past_values)
context = past_values[:, -self.config.context_length:]
observed_context = past_observed_mask[:, -self.config.context_length:]
_, loc, scale = self.scaler(context, observed_context)
inputs = (torch.cat((past_values, future_values), dim=1) - loc) / scale if future_values is not None else (past_values - loc) / scale
if loc.ndim == 3:
squeezed_loc = loc.squeeze(1)
squeezed_scale = scale.squeeze(1)
else:
squeezed_loc = loc
squeezed_scale = scale
log_abs_loc = squeezed_loc.abs().log1p()
log_scale = squeezed_scale.log()
static_feat = torch.cat((log_abs_loc, log_scale), dim=1)
if static_real_features is not None:
static_feat = torch.cat((static_real_features, static_feat), dim=1)
if static_categorical_features is not None:
embedded_cat = self.embedder(static_categorical_features)
static_feat = torch.cat((embedded_cat, static_feat), dim=1)
expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1)
features = torch.cat((expanded_static_feat, time_feat), dim=-1)
subsequences_length = self.config.context_length + self.config.prediction_length if future_values is not None else self.config.context_length
lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length)
lags_shape = lagged_sequence.shape
reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]:
raise ValueError(f'input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match')
transformer_inputs = torch.cat((reshaped_lagged_sequence, features), dim=-1)
return (transformer_inputs, loc, scale, static_feat)
def get_encoder(self):
return self.encoder
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Seq2SeqTSModelOutput, tuple]:
"""
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import TimeSeriesTransformerModel
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = TimeSeriesTransformerModel.from_pretrained("huggingface/time-series-transformer-tourism-monthly")
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> last_hidden_state = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_inputs, loc, scale, static_feat = self.create_network_inputs(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features)
if encoder_outputs is None:
enc_input = transformer_inputs[:, :self.config.context_length, ...]
encoder_outputs = self.encoder(inputs_embeds=enc_input, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
if self.config.context_length >= transformer_inputs.shape[1]:
bsz, _, dim = transformer_inputs.shape
dec_input = torch.zeros(size=(bsz, 1, dim), device=transformer_inputs.device, dtype=transformer_inputs.dtype)
else:
dec_input = transformer_inputs[:, self.config.context_length:, ...]
decoder_outputs = self.decoder(inputs_embeds=dec_input, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
if not return_dict:
return decoder_outputs + encoder_outputs + (loc, scale, static_feat)
return Seq2SeqTSModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, loc=loc, scale=scale, static_features=static_feat)
|
@auto_docstring
class TimeSeriesTransformerModel(TimeSeriesTransformerPreTrainedModel):
def __init__(self, config: TimeSeriesTransformerConfig):
pass
@property
def _past_length(self) -> int:
pass
def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor:
'''
Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I),
where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i,
j, :, k] = sequence[i, -indices[k]-S+j, :].
Args:
sequence: Tensor
The sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
subsequences_length : int
Length of the subsequences to be extracted.
shift: int
Shift the lags by this amount back.
'''
pass
def create_network_inputs(self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None):
pass
def get_encoder(self):
pass
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Seq2SeqTSModelOutput, tuple]:
'''
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import TimeSeriesTransformerModel
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = TimeSeriesTransformerModel.from_pretrained("huggingface/time-series-transformer-tourism-monthly")
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> last_hidden_state = outputs.last_hidden_state
```'''
pass
| 10
| 2
| 35
| 4
| 24
| 7
| 4
| 0.26
| 1
| 14
| 9
| 0
| 7
| 4
| 7
| 8
| 255
| 34
| 175
| 70
| 134
| 46
| 70
| 38
| 62
| 10
| 2
| 1
| 31
|
5,639
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerPreTrainedModel
|
from typing import Callable, Optional, Union
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from torch import nn
import torch
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
from .configuration_time_series_transformer import TimeSeriesTransformerConfig
@auto_docstring
class TimeSeriesTransformerPreTrainedModel(PreTrainedModel):
config: TimeSeriesTransformerConfig
base_model_prefix = 'model'
main_input_name = 'past_values'
supports_gradient_checkpointing = True
_supports_flash_attn = False
_supports_sdpa = False
_supports_flex_attn = False
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, TimeSeriesSinusoidalPositionalEmbedding):
module._init_weight()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int):
if self.config._attn_implementation == 'flash_attention_2':
attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, inputs_embeds, past_key_values_length)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
elif attention_mask is None:
attention_mask = make_flex_block_causal_mask(torch.ones(size=input_shape, device=inputs_embeds.device))
else:
attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)
return attention_mask
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if self.config._attn_implementation == 'flash_attention_2':
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
elif self.config._attn_implementation == 'flex_attention':
if isinstance(encoder_attention_mask, torch.Tensor):
encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False)
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
return encoder_attention_mask
|
@auto_docstring
class TimeSeriesTransformerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int):
pass
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
pass
| 6
| 0
| 12
| 0
| 12
| 0
| 6
| 0
| 1
| 1
| 1
| 4
| 1
| 0
| 1
| 1
| 18
| 1
| 17
| 7
| 15
| 0
| 15
| 7
| 13
| 6
| 1
| 2
| 6
|
5,640
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
|
transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesValueEmbedding
|
from torch import nn
class TimeSeriesValueEmbedding(nn.Module):
def __init__(self, feature_size, d_model):
super().__init__()
self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False)
def forward(self, x):
return self.value_projection(x)
|
class TimeSeriesValueEmbedding(nn.Module):
def __init__(self, feature_size, d_model):
pass
def forward(self, x):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 7
| 1
| 6
| 4
| 3
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
5,641
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/configuration_timesformer.py
|
transformers.models.timesformer.configuration_timesformer.TimesformerConfig
|
from ...configuration_utils import PretrainedConfig
class TimesformerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`TimesformerModel`]. It is used to instantiate a
TimeSformer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the TimeSformer
[facebook/timesformer-base-finetuned-k600](https://huggingface.co/facebook/timesformer-base-finetuned-k600)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_frames (`int`, *optional*, defaults to 8):
The number of frames in each video.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
attention_type (`str`, *optional*, defaults to `"divided_space_time"`):
The attention type to use. Must be one of `"divided_space_time"`, `"space_only"`, `"joint_space_time"`.
drop_path_rate (`float`, *optional*, defaults to 0):
The dropout ratio for stochastic depth.
Example:
```python
>>> from transformers import TimesformerConfig, TimesformerModel
>>> # Initializing a TimeSformer timesformer-base style configuration
>>> configuration = TimesformerConfig()
>>> # Initializing a model from the configuration
>>> model = TimesformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'timesformer'
def __init__(self, image_size=224, patch_size=16, num_channels=3, num_frames=8, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, attention_type='divided_space_time', drop_path_rate=0, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_frames = num_frames
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.attention_type = attention_type
self.drop_path_rate = drop_path_rate
|
class TimesformerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`TimesformerModel`]. It is used to instantiate a
TimeSformer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the TimeSformer
[facebook/timesformer-base-finetuned-k600](https://huggingface.co/facebook/timesformer-base-finetuned-k600)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_frames (`int`, *optional*, defaults to 8):
The number of frames in each video.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
attention_type (`str`, *optional*, defaults to `"divided_space_time"`):
The attention type to use. Must be one of `"divided_space_time"`, `"space_only"`, `"joint_space_time"`.
drop_path_rate (`float`, *optional*, defaults to 0):
The dropout ratio for stochastic depth.
Example:
```python
>>> from transformers import TimesformerConfig, TimesformerModel
>>> # Initializing a TimeSformer timesformer-base style configuration
>>> configuration = TimesformerConfig()
>>> # Initializing a model from the configuration
>>> model = TimesformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, image_size=224, patch_size=16, num_channels=3, num_frames=8, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, attention_type='divided_space_time', drop_path_rate=0, **kwargs):
pass
| 2
| 1
| 40
| 3
| 37
| 0
| 1
| 1.33
| 1
| 1
| 0
| 0
| 1
| 16
| 1
| 1
| 103
| 12
| 39
| 38
| 18
| 52
| 20
| 19
| 18
| 1
| 1
| 0
| 1
|
5,642
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimeSformerAttention
|
from typing import Optional, Union
from torch import nn
from .configuration_timesformer import TimesformerConfig
import torch.nn.functional
import torch
class TimeSformerAttention(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
super().__init__()
self.attention = TimesformerSelfAttention(config)
self.output = TimesformerSelfOutput(config)
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
self_outputs = self.attention(hidden_states, output_attentions)
attention_output = self.output(self_outputs[0])
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class TimeSformerAttention(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
pass
| 3
| 0
| 8
| 1
| 7
| 1
| 1
| 0.07
| 1
| 6
| 3
| 0
| 2
| 2
| 2
| 12
| 17
| 3
| 14
| 12
| 7
| 1
| 10
| 8
| 7
| 1
| 1
| 0
| 2
|
5,643
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimeSformerDropPath
|
from typing import Optional, Union
import torch
from torch import nn
import torch.nn.functional
class TimeSformerDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class TimeSformerDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
5,644
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerEmbeddings
|
from torch import nn
import torch.nn.functional
import torch
class TimesformerEmbeddings(nn.Module):
"""
Construct the patch and position embeddings.
"""
def __init__(self, config):
super().__init__()
embed_dim = config.hidden_size
num_frames = config.num_frames
drop_rate = config.hidden_dropout_prob
attention_type = config.attention_type
self.attention_type = attention_type
self.patch_embeddings = TimesformerPatchEmbeddings(config)
self.num_patches = self.patch_embeddings.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.position_embeddings = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if attention_type != 'space_only':
self.time_embeddings = nn.Parameter(torch.zeros(1, num_frames, embed_dim))
self.time_drop = nn.Dropout(p=drop_rate)
def forward(self, pixel_values):
batch_size = pixel_values.shape[0]
embeddings, num_frames, patch_width = self.patch_embeddings(pixel_values)
cls_tokens = self.cls_token.expand(embeddings.size(0), -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
if embeddings.size(1) != self.position_embeddings.size(1):
position_embeddings = self.position_embeddings
cls_pos_embed = position_embeddings[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = position_embeddings[0, 1:, :].unsqueeze(0).transpose(1, 2)
patch_num = int(other_pos_embed.size(2) ** 0.5)
patch_height = embeddings.size(1) // patch_width
other_pos_embed = other_pos_embed.reshape(1, embeddings.size(2), patch_num, patch_num)
new_pos_embed = nn.functional.interpolate(other_pos_embed, size=(patch_height, patch_width), mode='nearest')
new_pos_embed = new_pos_embed.flatten(2)
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
embeddings = embeddings + new_pos_embed
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.pos_drop(embeddings)
if self.attention_type != 'space_only':
cls_tokens = embeddings[:batch_size, 0, :].unsqueeze(1)
embeddings = embeddings[:, 1:]
_, patch_height, patch_width = embeddings.shape
embeddings = embeddings.reshape(batch_size, num_frames, patch_height, patch_width).permute(0, 2, 1, 3).reshape(batch_size * patch_height, num_frames, patch_width)
if num_frames != self.time_embeddings.size(1):
time_embeddings = self.time_embeddings.transpose(1, 2)
new_time_embeddings = nn.functional.interpolate(time_embeddings, size=num_frames, mode='nearest')
new_time_embeddings = new_time_embeddings.transpose(1, 2)
embeddings = embeddings + new_time_embeddings
else:
embeddings = embeddings + self.time_embeddings
embeddings = self.time_drop(embeddings)
embeddings = embeddings.view(batch_size, patch_height, num_frames, patch_width).reshape(batch_size, patch_height * num_frames, patch_width)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
return embeddings
|
class TimesformerEmbeddings(nn.Module):
'''
Construct the patch and position embeddings.
'''
def __init__(self, config):
pass
def forward(self, pixel_values):
pass
| 3
| 1
| 36
| 4
| 30
| 3
| 3
| 0.13
| 1
| 3
| 1
| 0
| 2
| 8
| 2
| 12
| 78
| 10
| 60
| 27
| 57
| 8
| 50
| 27
| 47
| 4
| 1
| 2
| 6
|
5,645
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerEncoder
|
import torch.nn.functional
from .configuration_timesformer import TimesformerConfig
from typing import Optional, Union
import torch
from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
from torch import nn
class TimesformerEncoder(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList([TimesformerLayer(config, ind) for ind in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class TimesformerEncoder(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
pass
| 3
| 0
| 22
| 3
| 19
| 0
| 5
| 0
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 45
| 7
| 38
| 16
| 29
| 0
| 23
| 10
| 20
| 9
| 1
| 2
| 10
|
5,646
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerForVideoClassification
|
from typing import Optional, Union
from ...utils import auto_docstring, logging
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
import torch.nn.functional
import torch
@auto_docstring(custom_intro='\n TimeSformer Model transformer with a video classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n ')
class TimesformerForVideoClassification(TimesformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.timesformer = TimesformerModel(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import av
>>> import torch
>>> import numpy as np
>>> from transformers import AutoImageProcessor, TimesformerForVideoClassification
>>> from huggingface_hub import hf_hub_download
>>> np.random.seed(0)
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
... '''
... Sample a given number of frame indices from the video.
... Args:
... clip_len (`int`): Total number of frames to sample.
... frame_sample_rate (`int`): Sample every n-th frame.
... seg_len (`int`): Maximum allowed index of sample's last frame.
... Returns:
... indices (`list[int]`): List of sampled frame indices
... '''
... converted_len = int(clip_len * frame_sample_rate)
... end_idx = np.random.randint(converted_len, seg_len)
... start_idx = end_idx - converted_len
... indices = np.linspace(start_idx, end_idx, num=clip_len)
... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
... return indices
>>> # video clip consists of 300 frames (10 seconds at 30 FPS)
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample 8 frames
>>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
>>> video = read_video_pyav(container, indices)
>>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
>>> model = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400")
>>> inputs = image_processor(list(video), return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
... logits = outputs.logits
>>> # model predicts one of the 400 Kinetics-400 classes
>>> predicted_label = logits.argmax(-1).item()
>>> print(model.config.id2label[predicted_label])
eating spaghetti
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.timesformer(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0][:, 0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n TimeSformer Model transformer with a video classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n ')
class TimesformerForVideoClassification(TimesformerPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import av
>>> import torch
>>> import numpy as np
>>> from transformers import AutoImageProcessor, TimesformerForVideoClassification
>>> from huggingface_hub import hf_hub_download
>>> np.random.seed(0)
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
... '''
... Sample a given number of frame indices from the video.
... Args:
... clip_len (`int`): Total number of frames to sample.
... frame_sample_rate (`int`): Sample every n-th frame.
... seg_len (`int`): Maximum allowed index of sample's last frame.
... Returns:
... indices (`list[int]`): List of sampled frame indices
... '''
... converted_len = int(clip_len * frame_sample_rate)
... end_idx = np.random.randint(converted_len, seg_len)
... start_idx = end_idx - converted_len
... indices = np.linspace(start_idx, end_idx, num=clip_len)
... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
... return indices
>>> # video clip consists of 300 frames (10 seconds at 30 FPS)
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample 8 frames
>>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
>>> video = read_video_pyav(container, indices)
>>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
>>> model = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400")
>>> inputs = image_processor(list(video), return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
... logits = outputs.logits
>>> # model predicts one of the 400 Kinetics-400 classes
>>> predicted_label = logits.argmax(-1).item()
>>> print(model.config.id2label[predicted_label])
eating spaghetti
```'''
pass
| 5
| 1
| 75
| 13
| 27
| 35
| 7
| 1.25
| 1
| 6
| 2
| 0
| 2
| 3
| 2
| 3
| 153
| 27
| 56
| 20
| 44
| 70
| 32
| 12
| 29
| 12
| 2
| 3
| 14
|
5,647
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerIntermediate
|
import torch
import torch.nn.functional
from .configuration_timesformer import TimesformerConfig
from ...activations import ACT2FN
from torch import nn
class TimesformerIntermediate(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class TimesformerIntermediate(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 3
| 2
| 12
| 17
| 3
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
5,648
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerLayer
|
from torch import nn
from ...modeling_layers import GradientCheckpointingLayer
import torch.nn.functional
import torch
from .configuration_timesformer import TimesformerConfig
class TimesformerLayer(GradientCheckpointingLayer):
def __init__(self, config: TimesformerConfig, layer_index: int) -> None:
super().__init__()
attention_type = config.attention_type
drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers, device='cpu')]
drop_path_rate = drop_path_rates[layer_index]
self.drop_path = TimeSformerDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.attention = TimeSformerAttention(config)
self.intermediate = TimesformerIntermediate(config)
self.output = TimesformerOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.config = config
self.attention_type = attention_type
if attention_type not in ['divided_space_time', 'space_only', 'joint_space_time']:
raise ValueError(f'Unknown attention type: {attention_type}')
if self.attention_type == 'divided_space_time':
self.temporal_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.temporal_attention = TimeSformerAttention(config)
self.temporal_dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False):
num_frames = self.config.num_frames
num_patch_width = self.config.image_size // self.config.patch_size
batch_size = hidden_states.shape[0]
num_spatial_tokens = (hidden_states.size(1) - 1) // num_frames
num_patch_height = num_spatial_tokens // num_patch_width
if self.attention_type in ['space_only', 'joint_space_time']:
self_attention_outputs = self.attention(self.layernorm_before(hidden_states), output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
hidden_states = hidden_states + self.drop_path(attention_output)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
layer_output = hidden_states + self.drop_path(layer_output)
outputs = (layer_output,) + outputs
return outputs
elif self.attention_type == 'divided_space_time':
temporal_embedding = hidden_states[:, 1:, :]
temporal_embedding = temporal_embedding.reshape(batch_size, num_patch_height, num_patch_width, num_frames, temporal_embedding.shape[2]).reshape(batch_size * num_patch_height * num_patch_width, num_frames, temporal_embedding.shape[2])
temporal_attention_outputs = self.temporal_attention(self.temporal_layernorm(temporal_embedding))
attention_output = temporal_attention_outputs[0]
residual_temporal = self.drop_path(attention_output)
residual_temporal = residual_temporal.reshape(batch_size, num_patch_height, num_patch_width, num_frames, residual_temporal.shape[2]).reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_temporal.shape[2])
residual_temporal = self.temporal_dense(residual_temporal)
temporal_embedding = hidden_states[:, 1:, :] + residual_temporal
init_cls_token = hidden_states[:, 0, :].unsqueeze(1)
cls_token = init_cls_token.repeat(1, num_frames, 1)
cls_token = cls_token.reshape(batch_size * num_frames, 1, cls_token.shape[2])
spatial_embedding = temporal_embedding
spatial_embedding = spatial_embedding.reshape(batch_size, num_patch_height, num_patch_width, num_frames, spatial_embedding.shape[2]).permute(0, 3, 1, 2, 4).reshape(batch_size * num_frames, num_patch_height * num_patch_width, spatial_embedding.shape[2])
spatial_embedding = torch.cat((cls_token, spatial_embedding), 1)
spatial_attention_outputs = self.attention(self.layernorm_before(spatial_embedding), output_attentions=output_attentions)
attention_output = spatial_attention_outputs[0]
outputs = spatial_attention_outputs[1:]
residual_spatial = self.drop_path(attention_output)
cls_token = residual_spatial[:, 0, :]
cls_token = cls_token.reshape(batch_size, num_frames, cls_token.shape[1])
cls_token = torch.mean(cls_token, 1, True)
residual_spatial = residual_spatial[:, 1:, :]
residual_spatial = residual_spatial.reshape(batch_size, num_frames, num_patch_height, num_patch_width, residual_spatial.shape[2]).permute(0, 2, 3, 1, 4).reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_spatial.shape[2])
residual = residual_spatial
hidden_states = temporal_embedding
hidden_states = torch.cat((init_cls_token, hidden_states), 1) + torch.cat((cls_token, residual), 1)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
layer_output = hidden_states + self.drop_path(layer_output)
outputs = (layer_output,) + outputs
return outputs
|
class TimesformerLayer(GradientCheckpointingLayer):
def __init__(self, config: TimesformerConfig, layer_index: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False):
pass
| 3
| 0
| 60
| 11
| 47
| 5
| 4
| 0.1
| 1
| 10
| 5
| 0
| 2
| 11
| 2
| 12
| 121
| 22
| 94
| 35
| 91
| 9
| 69
| 35
| 66
| 4
| 1
| 1
| 7
|
5,649
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerModel
|
from torch import nn
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
import torch.nn.functional
import torch
from typing import Optional, Union
@auto_docstring
class TimesformerModel(TimesformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = TimesformerEmbeddings(config)
self.encoder = TimesformerEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutput]:
"""
Examples:
```python
>>> import av
>>> import numpy as np
>>> from transformers import AutoImageProcessor, TimesformerModel
>>> from huggingface_hub import hf_hub_download
>>> np.random.seed(0)
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
... '''
... Sample a given number of frame indices from the video.
... Args:
... clip_len (`int`): Total number of frames to sample.
... frame_sample_rate (`int`): Sample every n-th frame.
... seg_len (`int`): Maximum allowed index of sample's last frame.
... Returns:
... indices (`list[int]`): List of sampled frame indices
... '''
... converted_len = int(clip_len * frame_sample_rate)
... end_idx = np.random.randint(converted_len, seg_len)
... start_idx = end_idx - converted_len
... indices = np.linspace(start_idx, end_idx, num=clip_len)
... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
... return indices
>>> # video clip consists of 300 frames (10 seconds at 30 FPS)
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample 8 frames
>>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
>>> video = read_video_pyav(container, indices)
>>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
>>> model = TimesformerModel.from_pretrained("facebook/timesformer-base-finetuned-k400")
>>> # prepare video for the model
>>> inputs = image_processor(list(video), return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 1569, 768]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
if self.layernorm is not None:
sequence_output = self.layernorm(sequence_output)
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class TimesformerModel(TimesformerPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutput]:
'''
Examples:
```python
>>> import av
>>> import numpy as np
>>> from transformers import AutoImageProcessor, TimesformerModel
>>> from huggingface_hub import hf_hub_download
>>> np.random.seed(0)
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
... '''
... Sample a given number of frame indices from the video.
... Args:
... clip_len (`int`): Total number of frames to sample.
... frame_sample_rate (`int`): Sample every n-th frame.
... seg_len (`int`): Maximum allowed index of sample's last frame.
... Returns:
... indices (`list[int]`): List of sampled frame indices
... '''
... converted_len = int(clip_len * frame_sample_rate)
... end_idx = np.random.randint(converted_len, seg_len)
... start_idx = end_idx - converted_len
... indices = np.linspace(start_idx, end_idx, num=clip_len)
... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
... return indices
>>> # video clip consists of 300 frames (10 seconds at 30 FPS)
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample 8 frames
>>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
>>> video = read_video_pyav(container, indices)
>>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
>>> model = TimesformerModel.from_pretrained("facebook/timesformer-base-finetuned-k400")
>>> # prepare video for the model
>>> inputs = image_processor(list(video), return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 1569, 768]
```'''
pass
| 7
| 2
| 32
| 5
| 10
| 17
| 3
| 1.52
| 1
| 5
| 3
| 0
| 4
| 4
| 4
| 5
| 135
| 24
| 44
| 20
| 31
| 67
| 25
| 13
| 20
| 6
| 2
| 1
| 10
|
5,650
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerOutput
|
from .configuration_timesformer import TimesformerConfig
import torch
from torch import nn
import torch.nn.functional
class TimesformerOutput(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class TimesformerOutput(nn.Module):
def __init__(self, config: TimesformerConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 11
| 2
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
5,651
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerPatchEmbeddings
|
from torch import nn
import collections
class TimesformerPatchEmbeddings(nn.Module):
"""Image to Patch Embedding"""
def __init__(self, config):
super().__init__()
image_size = config.image_size
patch_size = config.patch_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.projection = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values):
batch_size, num_frames, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * num_frames, num_channels, height, width)
embeddings = self.projection(pixel_values)
patch_width = embeddings.size(-1)
embeddings = embeddings.flatten(2).transpose(1, 2)
return (embeddings, num_frames, patch_width)
|
class TimesformerPatchEmbeddings(nn.Module):
'''Image to Patch Embedding'''
def __init__(self, config):
pass
def forward(self, pixel_values):
pass
| 3
| 1
| 12
| 3
| 9
| 0
| 2
| 0.05
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 27
| 7
| 19
| 13
| 16
| 1
| 19
| 13
| 16
| 3
| 1
| 0
| 4
|
5,652
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerPreTrainedModel
|
from torch import nn
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from .configuration_timesformer import TimesformerConfig
@auto_docstring
class TimesformerPreTrainedModel(PreTrainedModel):
config: TimesformerConfig
base_model_prefix = 'timesformer'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['TimesformerLayer']
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.trunc_normal_(module.weight, std=self.config.initializer_range)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.LayerNorm):
nn.init.constant_(module.bias, 0)
nn.init.constant_(module.weight, 1.0)
elif isinstance(module, TimesformerEmbeddings):
nn.init.trunc_normal_(module.cls_token, std=self.config.initializer_range)
nn.init.trunc_normal_(module.position_embeddings, std=self.config.initializer_range)
module.patch_embeddings.apply(self._init_weights)
|
@auto_docstring
class TimesformerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 12
| 0
| 12
| 0
| 5
| 0.22
| 1
| 1
| 1
| 2
| 1
| 0
| 1
| 1
| 24
| 2
| 18
| 7
| 16
| 4
| 16
| 7
| 14
| 5
| 1
| 2
| 5
|
5,653
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerSelfAttention
|
from torch import nn
from .configuration_timesformer import TimesformerConfig
class TimesformerSelfAttention(nn.Module):
def __init__(self, config: TimesformerConfig):
super().__init__()
num_heads = config.num_attention_heads
qkv_bias = config.qkv_bias
attention_dropout_prob = config.attention_probs_dropout_prob
self.num_heads = num_heads
head_dim = config.hidden_size // num_heads
self.scale = head_dim ** (-0.5)
self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attention_dropout_prob)
def forward(self, hidden_states, output_attentions: bool=False):
batch_size, hidden_size, num_channels = hidden_states.shape
qkv = self.qkv(hidden_states).reshape(batch_size, hidden_size, 3, self.num_heads, num_channels // self.num_heads).permute(2, 0, 3, 1, 4)
query, key, value = (qkv[0], qkv[1], qkv[2])
attention_probs = query @ key.transpose(-2, -1) * self.scale
attention_probs = attention_probs.softmax(dim=-1)
attention_probs = self.attn_drop(attention_probs)
context_layer = (attention_probs @ value).transpose(1, 2).reshape(batch_size, hidden_size, num_channels)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class TimesformerSelfAttention(nn.Module):
def __init__(self, config: TimesformerConfig):
pass
def forward(self, hidden_states, output_attentions: bool=False):
pass
| 3
| 0
| 15
| 3
| 12
| 0
| 2
| 0
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 32
| 7
| 25
| 17
| 22
| 0
| 21
| 17
| 18
| 2
| 1
| 0
| 3
|
5,654
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timesformer/modeling_timesformer.py
|
transformers.models.timesformer.modeling_timesformer.TimesformerSelfOutput
|
import torch
from torch import nn
from .configuration_timesformer import TimesformerConfig
import torch.nn.functional
class TimesformerSelfOutput(nn.Module):
"""
The residual connection is defined in TimesformerLayer instead of here (as is the case with other models), due to
the layernorm applied before each block.
"""
def __init__(self, config: TimesformerConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class TimesformerSelfOutput(nn.Module):
'''
The residual connection is defined in TimesformerLayer instead of here (as is the case with other models), due to
the layernorm applied before each block.
'''
def __init__(self, config: TimesformerConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 0.44
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 16
| 3
| 9
| 5
| 6
| 4
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
5,655
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timm_backbone/configuration_timm_backbone.py
|
transformers.models.timm_backbone.configuration_timm_backbone.TimmBackboneConfig
|
from ...configuration_utils import PretrainedConfig
class TimmBackboneConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`].
It is used to instantiate a timm backbone model according to the specified arguments, defining the model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone (`str`, *optional*):
The timm checkpoint to load.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
features_only (`bool`, *optional*, defaults to `True`):
Whether to output only the features or also the logits.
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
Whether to use a pretrained backbone.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). Will default to the last stage if unset.
freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`):
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`.
Example:
```python
>>> from transformers import TimmBackboneConfig, TimmBackbone
>>> # Initializing a timm backbone
>>> configuration = TimmBackboneConfig("resnet50")
>>> # Initializing a model from the configuration
>>> model = TimmBackbone(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'timm_backbone'
def __init__(self, backbone=None, num_channels=3, features_only=True, use_pretrained_backbone=True, out_indices=None, freeze_batch_norm_2d=False, **kwargs):
super().__init__(**kwargs)
self.backbone = backbone
self.num_channels = num_channels
self.features_only = features_only
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = True
self.out_indices = out_indices if out_indices is not None else [-1]
self.freeze_batch_norm_2d = freeze_batch_norm_2d
|
class TimmBackboneConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`].
It is used to instantiate a timm backbone model according to the specified arguments, defining the model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone (`str`, *optional*):
The timm checkpoint to load.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
features_only (`bool`, *optional*, defaults to `True`):
Whether to output only the features or also the logits.
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
Whether to use a pretrained backbone.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). Will default to the last stage if unset.
freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`):
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`.
Example:
```python
>>> from transformers import TimmBackboneConfig, TimmBackbone
>>> # Initializing a timm backbone
>>> configuration = TimmBackboneConfig("resnet50")
>>> # Initializing a model from the configuration
>>> model = TimmBackbone(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, backbone=None, num_channels=3, features_only=True, use_pretrained_backbone=True, out_indices=None, freeze_batch_norm_2d=False, **kwargs):
pass
| 2
| 1
| 18
| 0
| 18
| 0
| 2
| 1.5
| 1
| 1
| 0
| 0
| 1
| 7
| 1
| 1
| 59
| 9
| 20
| 19
| 9
| 30
| 11
| 10
| 9
| 2
| 1
| 0
| 2
|
5,656
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timm_backbone/modeling_timm_backbone.py
|
transformers.models.timm_backbone.modeling_timm_backbone.TimmBackbone
|
import torch
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...modeling_utils import PreTrainedModel
from ...modeling_outputs import BackboneOutput
from typing import Optional, Union
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
class TimmBackbone(PreTrainedModel, BackboneMixin):
"""
Wrapper class for timm models to be used as backbones. This enables using the timm models interchangeably with the
other models in the library keeping the same API.
"""
main_input_name = 'pixel_values'
supports_gradient_checkpointing = False
config: TimmBackboneConfig
def __init__(self, config, **kwargs):
requires_backends(self, 'timm')
super().__init__(config)
self.config = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.')
if hasattr(config, 'out_features') and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.')
pretrained = getattr(config, 'use_pretrained_backbone', None)
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.')
out_indices = config.out_indices if getattr(config, 'out_indices', None) is not None else (-1,)
in_chans = kwargs.pop('in_chans', config.num_channels)
self._backbone = timm.create_model(config.backbone, pretrained=pretrained, features_only=config.features_only, in_chans=in_chans, out_indices=out_indices, **kwargs)
if getattr(config, 'freeze_batch_norm_2d', False):
self.freeze_batch_norm_2d()
self._return_layers = {layer['module']: str(layer['index']) for layer in self._backbone.feature_info.get_dicts()}
self._all_layers = {layer['module']: str(i) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(config)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
requires_backends(cls, ['vision', 'timm'])
from ...models.timm_backbone import TimmBackboneConfig
config = kwargs.pop('config', TimmBackboneConfig())
use_timm = kwargs.pop('use_timm_backbone', True)
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones')
num_channels = kwargs.pop('num_channels', config.num_channels)
features_only = kwargs.pop('features_only', config.features_only)
use_pretrained_backbone = kwargs.pop('use_pretrained_backbone', config.use_pretrained_backbone)
out_indices = kwargs.pop('out_indices', config.out_indices)
config = TimmBackboneConfig(backbone=pretrained_model_name_or_path, num_channels=num_channels, features_only=features_only, use_pretrained_backbone=use_pretrained_backbone, out_indices=out_indices)
return super()._from_config(config, **kwargs)
def freeze_batch_norm_2d(self):
timm.utils.model.freeze_batch_norm_2d(self._backbone)
def unfreeze_batch_norm_2d(self):
timm.utils.model.unfreeze_batch_norm_2d(self._backbone)
def _init_weights(self, module):
"""
Empty init weights function to ensure compatibility of the class in the library.
"""
pass
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[BackboneOutput, tuple[Tensor, ...]]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment')
if output_hidden_states:
self._backbone.return_layers = self._all_layers
hidden_states = self._backbone(pixel_values, **kwargs)
self._backbone.return_layers = self._return_layers
feature_maps = tuple((hidden_states[i] for i in self.out_indices))
else:
feature_maps = self._backbone(pixel_values, **kwargs)
hidden_states = None
feature_maps = tuple(feature_maps)
hidden_states = tuple(hidden_states) if hidden_states is not None else None
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=feature_maps, hidden_states=hidden_states, attentions=None)
|
class TimmBackbone(PreTrainedModel, BackboneMixin):
'''
Wrapper class for timm models to be used as backbones. This enables using the timm models interchangeably with the
other models in the library keeping the same API.
'''
def __init__(self, config, **kwargs):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
pass
def freeze_batch_norm_2d(self):
pass
def unfreeze_batch_norm_2d(self):
pass
def _init_weights(self, module):
'''
Empty init weights function to ensure compatibility of the class in the library.
'''
pass
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[BackboneOutput, tuple[Tensor, ...]]:
pass
| 8
| 2
| 18
| 3
| 14
| 2
| 3
| 0.15
| 2
| 9
| 2
| 0
| 5
| 4
| 6
| 18
| 124
| 22
| 89
| 35
| 73
| 13
| 63
| 27
| 55
| 9
| 1
| 2
| 20
|
5,657
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timm_wrapper/configuration_timm_wrapper.py
|
transformers.models.timm_wrapper.configuration_timm_wrapper.TimmWrapperConfig
|
from ...utils import is_timm_available, logging, requires_backends
from ...configuration_utils import PretrainedConfig
from typing import Any, Optional
class TimmWrapperConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration for a timm backbone [`TimmWrapper`].
It is used to instantiate a timm model according to the specified arguments, defining the model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Config loads imagenet label descriptions and stores them in `id2label` attribute, `label2id` attribute for default
imagenet models is set to `None` due to occlusions in the label descriptions.
Args:
architecture (`str`, *optional*, defaults to `"resnet50"`):
The timm architecture to load.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
do_pooling (`bool`, *optional*, defaults to `True`):
Whether to do pooling for the last_hidden_state in `TimmWrapperModel` or not.
model_args (`dict[str, Any]`, *optional*):
Additional keyword arguments to pass to the `timm.create_model` function. e.g. `model_args={"depth": 3}`
for `timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k` to create a model with 3 blocks. Defaults to `None`.
Example:
```python
>>> from transformers import TimmWrapperModel
>>> # Initializing a timm model
>>> model = TimmWrapperModel.from_pretrained("timm/resnet18.a1_in1k")
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'timm_wrapper'
def __init__(self, architecture: str='resnet50', initializer_range: float=0.02, do_pooling: bool=True, model_args: Optional[dict[str, Any]]=None, **kwargs):
self.architecture = architecture
self.initializer_range = initializer_range
self.do_pooling = do_pooling
self.model_args = model_args
super().__init__(**kwargs)
@classmethod
def from_dict(cls, config_dict: dict[str, Any], **kwargs):
label_names = config_dict.get('label_names')
is_custom_model = 'num_labels' in kwargs or 'id2label' in kwargs
if label_names is None and (not is_custom_model):
requires_backends(cls, ['timm'])
imagenet_subset = infer_imagenet_subset(config_dict)
if imagenet_subset:
dataset_info = ImageNetInfo(imagenet_subset)
synsets = dataset_info.label_names()
label_descriptions = dataset_info.label_descriptions(as_dict=True)
label_names = [label_descriptions[synset] for synset in synsets]
if label_names is not None and (not is_custom_model):
kwargs['id2label'] = dict(enumerate(label_names))
if len(set(label_names)) == len(label_names):
kwargs['label2id'] = {name: i for i, name in enumerate(label_names)}
else:
kwargs['label2id'] = None
num_labels_in_kwargs = kwargs.pop('num_labels', None)
num_labels_in_dict = config_dict.pop('num_classes', None)
kwargs['num_labels'] = num_labels_in_kwargs or num_labels_in_dict
if 'pretrained_cfg' in config_dict and 'num_classes' in config_dict['pretrained_cfg']:
config_dict['pretrained_cfg'].pop('num_classes', None)
return super().from_dict(config_dict, **kwargs)
def to_dict(self) -> dict[str, Any]:
output = super().to_dict()
output['num_classes'] = self.num_labels
output['label_names'] = list(self.id2label.values())
output.pop('id2label', None)
output.pop('label2id', None)
return output
|
class TimmWrapperConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration for a timm backbone [`TimmWrapper`].
It is used to instantiate a timm model according to the specified arguments, defining the model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Config loads imagenet label descriptions and stores them in `id2label` attribute, `label2id` attribute for default
imagenet models is set to `None` due to occlusions in the label descriptions.
Args:
architecture (`str`, *optional*, defaults to `"resnet50"`):
The timm architecture to load.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
do_pooling (`bool`, *optional*, defaults to `True`):
Whether to do pooling for the last_hidden_state in `TimmWrapperModel` or not.
model_args (`dict[str, Any]`, *optional*):
Additional keyword arguments to pass to the `timm.create_model` function. e.g. `model_args={"depth": 3}`
for `timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k` to create a model with 3 blocks. Defaults to `None`.
Example:
```python
>>> from transformers import TimmWrapperModel
>>> # Initializing a timm model
>>> model = TimmWrapperModel.from_pretrained("timm/resnet18.a1_in1k")
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, architecture: str='resnet50', initializer_range: float=0.02, do_pooling: bool=True, model_args: Optional[dict[str, Any]]=None, **kwargs):
pass
@classmethod
def from_dict(cls, config_dict: dict[str, Any], **kwargs):
pass
def to_dict(self) -> dict[str, Any]:
pass
| 5
| 1
| 16
| 2
| 11
| 3
| 3
| 0.78
| 1
| 9
| 0
| 0
| 2
| 2
| 3
| 3
| 84
| 18
| 37
| 17
| 32
| 29
| 35
| 16
| 31
| 6
| 1
| 2
| 8
|
5,658
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timm_wrapper/image_processing_timm_wrapper.py
|
transformers.models.timm_wrapper.image_processing_timm_wrapper.TimmWrapperImageProcessor
|
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...utils.import_utils import is_timm_available, is_torch_available, requires
import os
from typing import Any, Optional, Union
from ...image_utils import ImageInput, make_flat_list_of_images
from ...utils import TensorType, logging, requires_backends
from ...image_transforms import to_pil_image
import torch
@requires(backends=('torch', 'timm', 'torchvision'))
class TimmWrapperImageProcessor(BaseImageProcessor):
"""
Wrapper class for timm models to be used within transformers.
Args:
pretrained_cfg (`dict[str, Any]`):
The configuration of the pretrained model used to resolve evaluation and
training transforms.
architecture (`Optional[str]`, *optional*):
Name of the architecture of the model.
"""
main_input_name = 'pixel_values'
def __init__(self, pretrained_cfg: dict[str, Any], architecture: Optional[str]=None, **kwargs):
requires_backends(self, 'timm')
super().__init__(architecture=architecture)
self.data_config = timm.data.resolve_data_config(pretrained_cfg, model=None, verbose=False)
self.val_transforms = timm.data.create_transform(**self.data_config, is_training=False)
self.train_transforms = timm.data.create_transform(**self.data_config, is_training=True)
self._not_supports_tensor_input = any((transform.__class__.__name__ == 'ToTensor' for transform in self.val_transforms.transforms))
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
"""
output = super().to_dict()
output.pop('train_transforms', None)
output.pop('val_transforms', None)
output.pop('_not_supports_tensor_input', None)
return output
@classmethod
def get_image_processor_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:
"""
Get the image processor dict for the model.
"""
image_processor_filename = kwargs.pop('image_processor_filename', 'config.json')
return super().get_image_processor_dict(pretrained_model_name_or_path, image_processor_filename=image_processor_filename, **kwargs)
def preprocess(self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]]='pt') -> BatchFeature:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return.
"""
if return_tensors != 'pt':
raise ValueError(f"return_tensors for TimmWrapperImageProcessor must be 'pt', but got {return_tensors}")
if self._not_supports_tensor_input and isinstance(images, torch.Tensor):
images = images.cpu().numpy()
if isinstance(images, torch.Tensor):
images = self.val_transforms(images)
images = images.unsqueeze(0) if images.ndim == 3 else images
else:
images = make_flat_list_of_images(images)
images = [to_pil_image(image) for image in images]
images = torch.stack([self.val_transforms(image) for image in images])
return BatchFeature({'pixel_values': images}, tensor_type=return_tensors)
def save_pretrained(self, *args, **kwargs):
logger.warning_once('The `save_pretrained` method is disabled for TimmWrapperImageProcessor. The image processor configuration is saved directly in `config.json` when `save_pretrained` is called for saving the model.')
|
@requires(backends=('torch', 'timm', 'torchvision'))
class TimmWrapperImageProcessor(BaseImageProcessor):
'''
Wrapper class for timm models to be used within transformers.
Args:
pretrained_cfg (`dict[str, Any]`):
The configuration of the pretrained model used to resolve evaluation and
training transforms.
architecture (`Optional[str]`, *optional*):
Name of the architecture of the model.
'''
def __init__(self, pretrained_cfg: dict[str, Any], architecture: Optional[str]=None, **kwargs):
pass
def to_dict(self) -> dict[str, Any]:
'''
Serializes this instance to a Python dictionary.
'''
pass
@classmethod
def get_image_processor_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:
'''
Get the image processor dict for the model.
'''
pass
def preprocess(self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]]='pt') -> BatchFeature:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return.
'''
pass
def save_pretrained(self, *args, **kwargs):
pass
| 8
| 4
| 16
| 1
| 10
| 4
| 2
| 0.58
| 1
| 8
| 2
| 0
| 4
| 4
| 5
| 25
| 98
| 14
| 53
| 25
| 35
| 31
| 32
| 13
| 26
| 5
| 3
| 1
| 9
|
5,659
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timm_wrapper/modeling_timm_wrapper.py
|
transformers.models.timm_wrapper.modeling_timm_wrapper.TimmWrapperForImageClassification
|
from typing import Optional, Union
import torch
from .configuration_timm_wrapper import TimmWrapperConfig
from ...utils import auto_docstring, is_timm_available, requires_backends
from ...modeling_outputs import ImageClassifierOutput, ModelOutput
from torch import Tensor, nn
class TimmWrapperForImageClassification(TimmWrapperPreTrainedModel):
"""
Wrapper class for timm models to be used in transformers for image classification.
"""
def __init__(self, config: TimmWrapperConfig):
super().__init__(config)
if config.num_labels == 0:
raise ValueError('You are trying to load weights into `TimmWrapperForImageClassification` from a checkpoint with no classifier head. Please specify the number of classes, e.g. `model = TimmWrapperForImageClassification.from_pretrained(..., num_labels=10)`, or use `TimmWrapperModel` for feature extraction.')
extra_init_kwargs = config.model_args or {}
self.timm_model = _create_timm_model_with_error_handling(config, num_classes=config.num_labels, **extra_init_kwargs)
self.num_labels = config.num_labels
self.post_init()
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[Union[bool, list[int]]]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[ImageClassifierOutput, tuple[Tensor, ...]]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. Not compatible with timm wrapped models.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. Not compatible with timm wrapped models.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
**kwargs:
Additional keyword arguments passed along to the `timm` model forward.
Examples:
```python
>>> import torch
>>> from PIL import Image
>>> from urllib.request import urlopen
>>> from transformers import AutoModelForImageClassification, AutoImageProcessor
>>> # Load image
>>> image = Image.open(urlopen(
... 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
... ))
>>> # Load model and image processor
>>> checkpoint = "timm/resnet50.a1_in1k"
>>> image_processor = AutoImageProcessor.from_pretrained(checkpoint)
>>> model = AutoModelForImageClassification.from_pretrained(checkpoint).eval()
>>> # Preprocess image
>>> inputs = image_processor(image)
>>> # Forward pass
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # Get top 5 predictions
>>> top5_probabilities, top5_class_indices = torch.topk(logits.softmax(dim=1) * 100, k=5)
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot set `output_attentions` for timm models.')
if output_hidden_states and (not hasattr(self.timm_model, 'forward_intermediates')):
raise ValueError("The 'output_hidden_states' option cannot be set for this timm model. To enable this feature, the 'forward_intermediates' method must be implemented in the timm model (available in timm versions > 1.*). Please consider using a different architecture or updating the timm package to a compatible version.")
pixel_values = pixel_values.to(self.device, self.dtype)
if output_hidden_states:
if isinstance(output_hidden_states, (list, tuple)):
kwargs['indices'] = output_hidden_states
last_hidden_state, hidden_states = self.timm_model.forward_intermediates(pixel_values, **kwargs)
logits = self.timm_model.forward_head(last_hidden_state)
else:
logits = self.timm_model(pixel_values, **kwargs)
hidden_states = None
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
outputs = (loss, logits, hidden_states)
outputs = tuple((output for output in outputs if output is not None))
return outputs
return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=hidden_states)
|
class TimmWrapperForImageClassification(TimmWrapperPreTrainedModel):
'''
Wrapper class for timm models to be used in transformers for image classification.
'''
def __init__(self, config: TimmWrapperConfig):
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[Union[bool, list[int]]]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[ImageClassifierOutput, tuple[Tensor, ...]]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. Not compatible with timm wrapped models.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. Not compatible with timm wrapped models.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
**kwargs:
Additional keyword arguments passed along to the `timm` model forward.
Examples:
```python
>>> import torch
>>> from PIL import Image
>>> from urllib.request import urlopen
>>> from transformers import AutoModelForImageClassification, AutoImageProcessor
>>> # Load image
>>> image = Image.open(urlopen(
... 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
... ))
>>> # Load model and image processor
>>> checkpoint = "timm/resnet50.a1_in1k"
>>> image_processor = AutoImageProcessor.from_pretrained(checkpoint)
>>> model = AutoModelForImageClassification.from_pretrained(checkpoint).eval()
>>> # Preprocess image
>>> inputs = image_processor(image)
>>> # Forward pass
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # Get top 5 predictions
>>> top5_probabilities, top5_class_indices = torch.topk(logits.softmax(dim=1) * 100, k=5)
```
'''
pass
| 4
| 2
| 60
| 8
| 37
| 15
| 10
| 0.43
| 1
| 9
| 2
| 0
| 2
| 2
| 2
| 7
| 127
| 18
| 76
| 19
| 63
| 33
| 45
| 10
| 42
| 17
| 2
| 3
| 19
|
5,660
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timm_wrapper/modeling_timm_wrapper.py
|
transformers.models.timm_wrapper.modeling_timm_wrapper.TimmWrapperModel
|
from typing import Optional, Union
import torch
from torch import Tensor, nn
from .configuration_timm_wrapper import TimmWrapperConfig
from ...utils import auto_docstring, is_timm_available, requires_backends
class TimmWrapperModel(TimmWrapperPreTrainedModel):
"""
Wrapper class for timm models to be used in transformers.
"""
def __init__(self, config: TimmWrapperConfig):
super().__init__(config)
extra_init_kwargs = config.model_args or {}
self.timm_model = _create_timm_model_with_error_handling(config, num_classes=0, **extra_init_kwargs)
self.post_init()
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[Union[bool, list[int]]]=None, return_dict: Optional[bool]=None, do_pooling: Optional[bool]=None, **kwargs) -> Union[TimmWrapperModelOutput, tuple[Tensor, ...]]:
"""
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. Not compatible with timm wrapped models.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. Not compatible with timm wrapped models.
do_pooling (`bool`, *optional*):
Whether to do pooling for the last_hidden_state in `TimmWrapperModel` or not. If `None` is passed, the
`do_pooling` value from the config is used.
Examples:
```python
>>> import torch
>>> from PIL import Image
>>> from urllib.request import urlopen
>>> from transformers import AutoModel, AutoImageProcessor
>>> # Load image
>>> image = Image.open(urlopen(
... 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
... ))
>>> # Load model and image processor
>>> checkpoint = "timm/resnet50.a1_in1k"
>>> image_processor = AutoImageProcessor.from_pretrained(checkpoint)
>>> model = AutoModel.from_pretrained(checkpoint).eval()
>>> # Preprocess image
>>> inputs = image_processor(image)
>>> # Forward pass
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # Get pooled output
>>> pooled_output = outputs.pooler_output
>>> # Get last hidden state
>>> last_hidden_state = outputs.last_hidden_state
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
do_pooling = do_pooling if do_pooling is not None else self.config.do_pooling
if output_attentions:
raise ValueError('Cannot set `output_attentions` for timm models.')
if output_hidden_states and (not hasattr(self.timm_model, 'forward_intermediates')):
raise ValueError("The 'output_hidden_states' option cannot be set for this timm model. To enable this feature, the 'forward_intermediates' method must be implemented in the timm model (available in timm versions > 1.*). Please consider using a different architecture or updating the timm package to a compatible version.")
pixel_values = pixel_values.to(self.device, self.dtype)
if output_hidden_states:
if isinstance(output_hidden_states, (list, tuple)):
kwargs['indices'] = output_hidden_states
last_hidden_state, hidden_states = self.timm_model.forward_intermediates(pixel_values, **kwargs)
else:
last_hidden_state = self.timm_model.forward_features(pixel_values, **kwargs)
hidden_states = None
if do_pooling:
pooler_output = self.timm_model.forward_head(last_hidden_state)
else:
pooler_output = None
if not return_dict:
outputs = (last_hidden_state, pooler_output, hidden_states)
outputs = tuple((output for output in outputs if output is not None))
return outputs
return TimmWrapperModelOutput(last_hidden_state=last_hidden_state, pooler_output=pooler_output, hidden_states=hidden_states)
|
class TimmWrapperModel(TimmWrapperPreTrainedModel):
'''
Wrapper class for timm models to be used in transformers.
'''
def __init__(self, config: TimmWrapperConfig):
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[Union[bool, list[int]]]=None, return_dict: Optional[bool]=None, do_pooling: Optional[bool]=None, **kwargs) -> Union[TimmWrapperModelOutput, tuple[Tensor, ...]]:
'''
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. Not compatible with timm wrapped models.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. Not compatible with timm wrapped models.
do_pooling (`bool`, *optional*):
Whether to do pooling for the last_hidden_state in `TimmWrapperModel` or not. If `None` is passed, the
`do_pooling` value from the config is used.
Examples:
```python
>>> import torch
>>> from PIL import Image
>>> from urllib.request import urlopen
>>> from transformers import AutoModel, AutoImageProcessor
>>> # Load image
>>> image = Image.open(urlopen(
... 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
... ))
>>> # Load model and image processor
>>> checkpoint = "timm/resnet50.a1_in1k"
>>> image_processor = AutoImageProcessor.from_pretrained(checkpoint)
>>> model = AutoModel.from_pretrained(checkpoint).eval()
>>> # Preprocess image
>>> inputs = image_processor(image)
>>> # Forward pass
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # Get pooled output
>>> pooled_output = outputs.pooler_output
>>> # Get last hidden state
>>> last_hidden_state = outputs.last_hidden_state
```
'''
pass
| 4
| 2
| 49
| 8
| 25
| 17
| 6
| 0.69
| 1
| 9
| 2
| 0
| 2
| 1
| 2
| 7
| 105
| 17
| 52
| 16
| 39
| 36
| 29
| 7
| 26
| 11
| 2
| 2
| 12
|
5,661
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/timm_wrapper/modeling_timm_wrapper.py
|
transformers.models.timm_wrapper.modeling_timm_wrapper.TimmWrapperPreTrainedModel
|
from torch import Tensor, nn
from ...utils import auto_docstring, is_timm_available, requires_backends
from ...modeling_utils import PreTrainedModel
from .configuration_timm_wrapper import TimmWrapperConfig
@auto_docstring
class TimmWrapperPreTrainedModel(PreTrainedModel):
main_input_name = 'pixel_values'
config: TimmWrapperConfig
_no_split_modules = []
model_tags = ['timm']
accepts_loss_kwargs = False
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision', 'timm'])
super().__init__(*args, **kwargs)
def post_init(self):
self.supports_gradient_checkpointing = self._timm_model_supports_gradient_checkpointing()
super().post_init()
@staticmethod
def _fix_state_dict_key_on_load(key) -> tuple[str, bool]:
"""
Overrides original method that renames `gamma` and `beta` to `weight` and `bias`.
We don't want this behavior for timm wrapped models. Instead, this method adds a
"timm_model." prefix to enable loading official timm Hub checkpoints.
"""
if 'timm_model.' not in key:
return (f'timm_model.{key}', True)
return (key, False)
def _fix_state_dict_key_on_save(self, key):
"""
Overrides original method to remove "timm_model." prefix from state_dict keys.
Makes the saved checkpoint compatible with the `timm` library.
"""
return (key.replace('timm_model.', ''), True)
def load_state_dict(self, state_dict, *args, **kwargs):
"""
Override original method to fix state_dict keys on load for cases when weights are loaded
without using the `from_pretrained` method (e.g., in Trainer to resume from checkpoint).
"""
state_dict = {self._fix_state_dict_key_on_load(k)[0]: v for k, v in state_dict.items()}
return super().load_state_dict(state_dict, *args, **kwargs)
def _init_weights(self, module):
"""
Initialize weights function to properly initialize Linear layer weights.
Since model architectures may vary, we assume only the classifier requires
initialization, while all other weights should be loaded from the checkpoint.
"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _timm_model_supports_gradient_checkpointing(self):
"""
Check if the timm model supports gradient checkpointing by checking if the `set_grad_checkpointing` method is available.
Some timm models will have the method but will raise an AssertionError when called so in this case we return False.
"""
if not hasattr(self.timm_model, 'set_grad_checkpointing'):
return False
try:
self.timm_model.set_grad_checkpointing(enable=True)
self.timm_model.set_grad_checkpointing(enable=False)
return True
except Exception:
return False
def _set_gradient_checkpointing(self, enable: bool=True, *args, **kwargs):
self.timm_model.set_grad_checkpointing(enable)
|
@auto_docstring
class TimmWrapperPreTrainedModel(PreTrainedModel):
def __init__(self, *args, **kwargs):
pass
def post_init(self):
pass
@staticmethod
def _fix_state_dict_key_on_load(key) -> tuple[str, bool]:
'''
Overrides original method that renames `gamma` and `beta` to `weight` and `bias`.
We don't want this behavior for timm wrapped models. Instead, this method adds a
"timm_model." prefix to enable loading official timm Hub checkpoints.
'''
pass
def _fix_state_dict_key_on_save(self, key):
'''
Overrides original method to remove "timm_model." prefix from state_dict keys.
Makes the saved checkpoint compatible with the `timm` library.
'''
pass
def load_state_dict(self, state_dict, *args, **kwargs):
'''
Override original method to fix state_dict keys on load for cases when weights are loaded
without using the `from_pretrained` method (e.g., in Trainer to resume from checkpoint).
'''
pass
def _init_weights(self, module):
'''
Initialize weights function to properly initialize Linear layer weights.
Since model architectures may vary, we assume only the classifier requires
initialization, while all other weights should be loaded from the checkpoint.
'''
pass
def _timm_model_supports_gradient_checkpointing(self):
'''
Check if the timm model supports gradient checkpointing by checking if the `set_grad_checkpointing` method is available.
Some timm models will have the method but will raise an AssertionError when called so in this case we return False.
'''
pass
def _set_gradient_checkpointing(self, enable: bool=True, *args, **kwargs):
pass
| 11
| 5
| 7
| 0
| 3
| 4
| 2
| 0.79
| 1
| 3
| 0
| 2
| 4
| 0
| 5
| 5
| 49
| 6
| 24
| 12
| 17
| 19
| 23
| 11
| 17
| 3
| 1
| 2
| 8
|
5,662
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/configuration_trocr.py
|
transformers.models.trocr.configuration_trocr.TrOCRConfig
|
from ...configuration_utils import PretrainedConfig
class TrOCRConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`TrOCRForCausalLM`]. It is used to instantiate an
TrOCR model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the TrOCR
[microsoft/trocr-base-handwritten](https://huggingface.co/microsoft/trocr-base-handwritten) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the TrOCR model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`TrOCRForCausalLM`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the pooler. If string, `"gelu"`, `"relu"`,
`"silu"` and `"gelu_new"` are supported.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
scale_embedding (`bool`, *optional*, defaults to `False`):
Whether or not to scale the word embeddings by sqrt(d_model).
use_learned_position_embeddings (`bool`, *optional*, defaults to `True`):
Whether or not to use learned position embeddings. If not, sinusoidal position embeddings will be used.
layernorm_embedding (`bool`, *optional*, defaults to `True`):
Whether or not to use a layernorm after the word + position embeddings.
Example:
```python
>>> from transformers import TrOCRConfig, TrOCRForCausalLM
>>> # Initializing a TrOCR-base style configuration
>>> configuration = TrOCRConfig()
>>> # Initializing a model (with random weights) from the TrOCR-base style configuration
>>> model = TrOCRForCausalLM(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'trocr'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers'}
def __init__(self, vocab_size=50265, d_model=1024, decoder_layers=12, decoder_attention_heads=16, decoder_ffn_dim=4096, activation_function='gelu', max_position_embeddings=512, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, decoder_start_token_id=2, init_std=0.02, decoder_layerdrop=0.0, use_cache=True, scale_embedding=False, use_learned_position_embeddings=True, layernorm_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
self.vocab_size = vocab_size
self.d_model = d_model
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.activation_function = activation_function
self.max_position_embeddings = max_position_embeddings
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.init_std = init_std
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.scale_embedding = scale_embedding
self.use_learned_position_embeddings = use_learned_position_embeddings
self.layernorm_embedding = layernorm_embedding
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs)
|
class TrOCRConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`TrOCRForCausalLM`]. It is used to instantiate an
TrOCR model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the TrOCR
[microsoft/trocr-base-handwritten](https://huggingface.co/microsoft/trocr-base-handwritten) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the TrOCR model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`TrOCRForCausalLM`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the pooler. If string, `"gelu"`, `"relu"`,
`"silu"` and `"gelu_new"` are supported.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
scale_embedding (`bool`, *optional*, defaults to `False`):
Whether or not to scale the word embeddings by sqrt(d_model).
use_learned_position_embeddings (`bool`, *optional*, defaults to `True`):
Whether or not to use learned position embeddings. If not, sinusoidal position embeddings will be used.
layernorm_embedding (`bool`, *optional*, defaults to `True`):
Whether or not to use a layernorm after the word + position embeddings.
Example:
```python
>>> from transformers import TrOCRConfig, TrOCRForCausalLM
>>> # Initializing a TrOCR-base style configuration
>>> configuration = TrOCRConfig()
>>> # Initializing a model (with random weights) from the TrOCR-base style configuration
>>> model = TrOCRForCausalLM(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50265, d_model=1024, decoder_layers=12, decoder_attention_heads=16, decoder_ffn_dim=4096, activation_function='gelu', max_position_embeddings=512, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, decoder_start_token_id=2, init_std=0.02, decoder_layerdrop=0.0, use_cache=True, scale_embedding=False, use_learned_position_embeddings=True, layernorm_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
pass
| 2
| 1
| 48
| 1
| 47
| 0
| 1
| 0.98
| 1
| 1
| 0
| 0
| 1
| 16
| 1
| 1
| 120
| 11
| 55
| 44
| 30
| 54
| 22
| 21
| 20
| 1
| 1
| 0
| 1
|
5,663
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/modeling_trocr.py
|
transformers.models.trocr.modeling_trocr.TrOCRAttention
|
from typing import Optional, Union
import torch
from torch import nn
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
class TrOCRAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper."""
def __init__(self, config, embed_dim: int, num_heads: int, kdim: Optional[int]=None, vdim: Optional[int]=None, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, is_cross_attention: Optional[bool]=False, layer_idx: Optional[bool]=None):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if not self.head_dim * num_heads == self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.layer_idx = layer_idx
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
query_states = self.q_proj(hidden_states) * self.scaling
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}')
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}')
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}')
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights_reshaped)
|
class TrOCRAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper.'''
def __init__(self, config, embed_dim: int, num_heads: int, kdim: Optional[int]=None, vdim: Optional[int]=None, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, is_cross_attention: Optional[bool]=False, layer_idx: Optional[bool]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 47
| 6
| 35
| 7
| 6
| 0.2
| 1
| 6
| 0
| 0
| 3
| 12
| 3
| 13
| 147
| 21
| 105
| 46
| 82
| 21
| 68
| 27
| 64
| 12
| 1
| 2
| 17
|
5,664
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/modeling_trocr.py
|
transformers.models.trocr.modeling_trocr.TrOCRDecoder
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
from torch import nn
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import math
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
import torch
from .configuration_trocr import TrOCRConfig
class TrOCRDecoder(TrOCRPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TrOCRDecoderLayer`]
Args:
config: TrOCRConfig
"""
def __init__(self, config: TrOCRConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
self.embed_tokens = TrOCRScaledWordEmbedding(config.vocab_size, config.hidden_size, self.padding_idx, embed_scale=embed_scale)
if config.use_learned_position_embeddings:
self.embed_positions = TrOCRLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size)
else:
self.embed_positions = TrOCRSinusoidalPositionalEmbedding(config.max_position_embeddings + self.padding_idx + 1, config.hidden_size, self.padding_idx)
if config.layernorm_embedding:
self.layernorm_embedding = nn.LayerNorm(config.hidden_size)
else:
self.layernorm_embedding = None
self.layers = nn.ModuleList([TrOCRDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.gradient_checkpointing = False
self.post_init()
def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None):
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')
elif input_ids is not None:
input = input_ids
input_ids = input_ids.view(-1, input.shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`...')
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None else DynamicCache(config=self.config)
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if self.config.use_learned_position_embeddings:
embed_pos = self.embed_positions(input, past_key_values_length=past_key_values_length)
else:
embed_pos = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
hidden_states = inputs_embeds + embed_pos
if self.layernorm_embedding is not None:
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
input_shape = input.shape
attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)
if encoder_hidden_states is not None and encoder_attention_mask is not None:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
|
class TrOCRDecoder(TrOCRPreTrainedModel):
'''
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TrOCRDecoderLayer`]
Args:
config: TrOCRConfig
'''
def __init__(self, config: TrOCRConfig):
pass
def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None):
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 63
| 9
| 38
| 16
| 11
| 0.44
| 1
| 12
| 6
| 0
| 4
| 8
| 4
| 5
| 263
| 42
| 154
| 42
| 135
| 67
| 83
| 28
| 78
| 39
| 2
| 3
| 45
|
5,665
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/modeling_trocr.py
|
transformers.models.trocr.modeling_trocr.TrOCRDecoderLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from ...activations import ACT2FN
from ...utils.deprecation import deprecate_kwarg
from .configuration_trocr import TrOCRConfig
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from torch import nn
class TrOCRDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: TrOCRConfig, layer_idx=None):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = TrOCRAttention(config, embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
if config.is_decoder:
self.encoder_attn = TrOCRAttention(config, embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, kdim=config.cross_attention_hidden_size, vdim=config.cross_attention_hidden_size, dropout=config.attention_dropout, is_decoder=True, is_cross_attention=True, layer_idx=layer_idx)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size *(decoder_attention_heads,)*.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
class TrOCRDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: TrOCRConfig, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None):
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size *(decoder_attention_heads,)*.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 4
| 1
| 63
| 9
| 42
| 13
| 4
| 0.3
| 1
| 5
| 2
| 0
| 2
| 11
| 2
| 12
| 127
| 18
| 84
| 32
| 70
| 25
| 45
| 21
| 42
| 6
| 1
| 1
| 8
|
5,666
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/modeling_trocr.py
|
transformers.models.trocr.modeling_trocr.TrOCRDecoderWrapper
|
from ...utils import auto_docstring, logging
@auto_docstring(custom_intro='\n The TrOCR Model with a language modeling head. Can be used for summarization.\n This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is\n used in combination with the [`EncoderDecoderModel`] framework.\n ')
class TrOCRDecoderWrapper(TrOCRPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.decoder = TrOCRDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
|
@auto_docstring(custom_intro='\n The TrOCR Model with a language modeling head. Can be used for summarization.\n This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is\n used in combination with the [`EncoderDecoderModel`] framework.\n ')
class TrOCRDecoderWrapper(TrOCRPreTrainedModel):
def __init__(self, config):
pass
def forward(self, *args, **kwargs):
pass
| 4
| 0
| 3
| 0
| 3
| 0
| 1
| 0.67
| 1
| 2
| 1
| 0
| 2
| 1
| 2
| 3
| 12
| 2
| 6
| 4
| 3
| 4
| 6
| 4
| 3
| 1
| 2
| 0
| 2
|
5,667
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/modeling_trocr.py
|
transformers.models.trocr.modeling_trocr.TrOCRForCausalLM
|
from ...generation import GenerationMixin
import torch
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from torch.nn import CrossEntropyLoss
from torch import nn
@auto_docstring(custom_intro='\n The TrOCR Decoder with a language modeling head. Can be used as the decoder part of [`EncoderDecoderModel`] and\n ')
class TrOCRForCausalLM(TrOCRPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['output_projection.weight']
def __init__(self, config):
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = TrOCRDecoderWrapper(config)
self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.output_projection
def set_output_embeddings(self, new_embeddings):
self.output_projection = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
"""
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import (
... TrOCRConfig,
... TrOCRProcessor,
... TrOCRForCausalLM,
... ViTConfig,
... ViTModel,
... VisionEncoderDecoderModel,
... )
>>> import requests
>>> from PIL import Image
>>> # TrOCR is a decoder model and should be used within a VisionEncoderDecoderModel
>>> # init vision2text model with random weights
>>> encoder = ViTModel(ViTConfig())
>>> decoder = TrOCRForCausalLM(TrOCRConfig())
>>> model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)
>>> # If you want to start from the pretrained model, load the checkpoint with `VisionEncoderDecoderModel`
>>> processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten")
>>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten")
>>> # load image from the IAM dataset
>>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
>>> pixel_values = processor(image, return_tensors="pt").pixel_values
>>> text = "industry, ' Mr. Brown commented icily. ' Let us have a"
>>> # training
>>> model.config.decoder_start_token_id = processor.tokenizer.eos_token_id
>>> model.config.pad_token_id = processor.tokenizer.pad_token_id
>>> model.config.vocab_size = model.config.decoder.vocab_size
>>> labels = processor.tokenizer(text, return_tensors="pt").input_ids
>>> outputs = model(pixel_values, labels=labels)
>>> loss = outputs.loss
>>> round(loss.item(), 2)
5.30
>>> # inference
>>> generated_ids = model.generate(pixel_values)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> generated_text
'industry, " Mr. Brown commented icily. " Let us have a'
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
logits = self.output_projection(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n The TrOCR Decoder with a language modeling head. Can be used as the decoder part of [`EncoderDecoderModel`] and\n ')
class TrOCRForCausalLM(TrOCRPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
def set_decoder(self, decoder):
pass
def get_decoder(self):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
'''
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import (
... TrOCRConfig,
... TrOCRProcessor,
... TrOCRForCausalLM,
... ViTConfig,
... ViTModel,
... VisionEncoderDecoderModel,
... )
>>> import requests
>>> from PIL import Image
>>> # TrOCR is a decoder model and should be used within a VisionEncoderDecoderModel
>>> # init vision2text model with random weights
>>> encoder = ViTModel(ViTConfig())
>>> decoder = TrOCRForCausalLM(TrOCRConfig())
>>> model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)
>>> # If you want to start from the pretrained model, load the checkpoint with `VisionEncoderDecoderModel`
>>> processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten")
>>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten")
>>> # load image from the IAM dataset
>>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
>>> pixel_values = processor(image, return_tensors="pt").pixel_values
>>> text = "industry, ' Mr. Brown commented icily. ' Let us have a"
>>> # training
>>> model.config.decoder_start_token_id = processor.tokenizer.eos_token_id
>>> model.config.pad_token_id = processor.tokenizer.pad_token_id
>>> model.config.vocab_size = model.config.decoder.vocab_size
>>> labels = processor.tokenizer(text, return_tensors="pt").input_ids
>>> outputs = model(pixel_values, labels=labels)
>>> loss = outputs.loss
>>> round(loss.item(), 2)
5.30
>>> # inference
>>> generated_ids = model.generate(pixel_values)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> generated_text
'industry, " Mr. Brown commented icily. " Let us have a'
```'''
pass
| 11
| 1
| 23
| 3
| 9
| 11
| 2
| 1.18
| 2
| 6
| 2
| 0
| 8
| 2
| 9
| 10
| 216
| 37
| 82
| 37
| 55
| 97
| 41
| 20
| 31
| 7
| 2
| 1
| 16
|
5,668
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/modeling_trocr.py
|
transformers.models.trocr.modeling_trocr.TrOCRLearnedPositionalEmbedding
|
import torch
from typing import Optional, Union
from torch import nn
class TrOCRLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None):
"""`input_ids' shape is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids.shape[:2]
position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device).expand(bsz, -1)
else:
position_ids = position_ids.unsqueeze(0)
return super().forward(position_ids + self.offset)
|
class TrOCRLearnedPositionalEmbedding(nn.Embedding):
'''
This module learns positional embeddings up to a fixed maximum size.
'''
def __init__(self, num_embeddings: int, embedding_dim: int):
pass
def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None):
'''`input_ids' shape is expected to be [bsz x seqlen].'''
pass
| 3
| 2
| 7
| 1
| 5
| 2
| 1
| 0.6
| 1
| 3
| 0
| 0
| 2
| 1
| 2
| 2
| 20
| 4
| 10
| 6
| 7
| 6
| 8
| 6
| 5
| 1
| 1
| 0
| 2
|
5,669
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/modeling_trocr.py
|
transformers.models.trocr.modeling_trocr.TrOCRPreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from .configuration_trocr import TrOCRConfig
from torch import nn
from ...utils import auto_docstring, logging
@auto_docstring
class TrOCRPreTrainedModel(PreTrainedModel):
config: TrOCRConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['TrOCRDecoderLayer']
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, (nn.Linear, nn.Conv1d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
@auto_docstring
class TrOCRPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 1
| 16
| 1
| 15
| 7
| 13
| 0
| 14
| 7
| 12
| 5
| 1
| 2
| 5
|
5,670
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/modeling_trocr.py
|
transformers.models.trocr.modeling_trocr.TrOCRScaledWordEmbedding
|
import torch
from torch import nn
from typing import Optional, Union
class TrOCRScaledWordEmbedding(nn.Embedding):
"""
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.embed_scale = embed_scale
def forward(self, input_ids: torch.Tensor):
return super().forward(input_ids) * self.embed_scale
|
class TrOCRScaledWordEmbedding(nn.Embedding):
'''
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
'''
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0):
pass
def forward(self, input_ids: torch.Tensor):
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 1
| 0.5
| 1
| 4
| 0
| 0
| 2
| 1
| 2
| 2
| 11
| 2
| 6
| 4
| 3
| 3
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
5,671
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/modeling_trocr.py
|
transformers.models.trocr.modeling_trocr.TrOCRSinusoidalPositionalEmbedding
|
import torch
from torch import nn
from typing import Optional, Union
import math
class TrOCRSinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = self.get_embedding(num_positions, embedding_dim, padding_idx)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None):
"""
Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0):
bsz, seq_len = input_ids.size()
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(input_ids.device)
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
self.weights = self.get_embedding(max_pos, self.embedding_dim, self.padding_idx)
self.weights = self.weights.to(self._float_tensor)
x = self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
return x
def create_position_ids_from_input_ids(self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int]=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
|
class TrOCRSinusoidalPositionalEmbedding(nn.Module):
'''This module produces sinusoidal positional embeddings of any length.'''
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None):
pass
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None):
'''
Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
description in Section 3.5 of "Attention Is All You Need".
'''
pass
@torch.no_grad()
def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0):
pass
def create_position_ids_from_input_ids(self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int]=0):
'''
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
'''
pass
| 7
| 3
| 13
| 1
| 9
| 3
| 2
| 0.37
| 1
| 3
| 0
| 0
| 3
| 4
| 4
| 14
| 60
| 8
| 38
| 21
| 29
| 14
| 32
| 17
| 27
| 3
| 1
| 1
| 7
|
5,672
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/processing_trocr.py
|
transformers.models.trocr.processing_trocr.TrOCRProcessor
|
from contextlib import contextmanager
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
import warnings
from ...image_processing_utils import BatchFeature
from typing import Optional, Union
from ...image_utils import ImageInput
from ...tokenization_utils_base import PreTokenizedInput, TextInput
class TrOCRProcessor(ProcessorMixin):
"""
Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor.
[`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and
[`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for
more information.
Args:
image_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*):
An instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input.
tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*):
An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'AutoImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if 'feature_extractor' in kwargs:
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = image_processor if image_processor is not None else feature_extractor
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
self._in_target_context_manager = False
def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[TrOCRProcessorKwargs]) -> BatchFeature:
"""
When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
[`~AutoImageProcessor.__call__`] and returns its output. If used in the context
[`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's
[`~TrOCRTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information.
"""
if self._in_target_context_manager:
return self.current_processor(images, **kwargs)
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
output_kwargs = self._merge_kwargs(TrOCRProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
if images is not None:
inputs = self.image_processor(images, **output_kwargs['images_kwargs'])
if text is not None:
encodings = self.tokenizer(text, **output_kwargs['text_kwargs'])
if text is None:
return inputs
elif images is None:
return encodings
else:
inputs['labels'] = encodings['input_ids']
return inputs
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
return image_processor_input_names + ['labels']
@contextmanager
def as_target_processor(self):
"""
Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
"""
warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your images inputs, or in a separate call.')
self._in_target_context_manager = True
self.current_processor = self.tokenizer
yield
self.current_processor = self.image_processor
self._in_target_context_manager = False
@property
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor
|
class TrOCRProcessor(ProcessorMixin):
'''
Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor.
[`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and
[`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for
more information.
Args:
image_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*):
An instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input.
tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*):
An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input.
'''
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[TrOCRProcessorKwargs]) -> BatchFeature:
'''
When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
[`~AutoImageProcessor.__call__`] and returns its output. If used in the context
[`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's
[`~TrOCRTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information.
'''
pass
@property
def model_input_names(self):
pass
@contextmanager
def as_target_processor(self):
'''
Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
'''
pass
@property
def feature_extractor_class(self):
pass
@property
def feature_extractor_class(self):
pass
| 11
| 3
| 14
| 1
| 10
| 3
| 2
| 0.37
| 1
| 5
| 2
| 0
| 7
| 2
| 7
| 24
| 124
| 16
| 79
| 27
| 61
| 29
| 49
| 17
| 41
| 7
| 2
| 1
| 17
|
5,673
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/trocr/processing_trocr.py
|
transformers.models.trocr.processing_trocr.TrOCRProcessorKwargs
|
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
class TrOCRProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {}
|
class TrOCRProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
5,674
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/configuration_tvp.py
|
transformers.models.tvp.configuration_tvp.TvpConfig
|
from ..auto import CONFIG_MAPPING
from ...configuration_utils import PretrainedConfig
from ...utils.backbone_utils import verify_backbone_config_arguments
import copy
class TvpConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`TvpModel`]. It is used to instantiate an Tvp
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Tvp
[Intel/tvp-base](https://huggingface.co/Intel/tvp-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone_config (`PretrainedConfig` or `dict`, *optional*):
The configuration of the backbone model.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
distance_loss_weight (`float`, *optional*, defaults to 1.0):
The weight of distance loss.
duration_loss_weight (`float`, *optional*, defaults to 0.1):
The weight of duration loss.
visual_prompter_type (`str`, *optional*, defaults to `"framepad"`):
Visual prompt type. The type of padding. Framepad means padding on each frame. Should be one of "framepad"
or "framedownpad"
visual_prompter_apply (`str`, *optional*, defaults to `"replace"`):
The way of applying visual prompt. Replace means use the value of prompt to change the original value in
visual inputs. Should be one of "replace", or "add", or "remove".
visual_prompt_size (`int`, *optional*, defaults to 96):
The size of visual prompt.
max_img_size (`int`, *optional*, defaults to 448):
The maximum size of frame.
num_frames (`int`, *optional*, defaults to 48):
The number of frames extracted from a video.
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Tvp text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`TvpModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
max_grid_col_position_embeddings (`int`, *optional*, defaults to 100):
The largest number of horizontal patches from a video frame.
max_grid_row_position_embeddings (`int`, *optional*, defaults to 100):
The largest number of vertical patches from a video frame.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability of hidden layers.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability of attention layers.
"""
model_type = 'tvp'
def __init__(self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, distance_loss_weight=1.0, duration_loss_weight=0.1, visual_prompter_type='framepad', visual_prompter_apply='replace', visual_prompt_size=96, max_img_size=448, num_frames=48, vocab_size=30522, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, max_position_embeddings=512, max_grid_col_position_embeddings=100, max_grid_row_position_embeddings=100, hidden_dropout_prob=0.1, hidden_act='gelu', layer_norm_eps=1e-12, initializer_range=0.02, attention_probs_dropout_prob=0.1, **kwargs):
super().__init__(**kwargs)
if backbone_config is None and backbone is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
backbone_config = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get('model_type')
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)
self.backbone_config = backbone_config
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
self.distance_loss_weight = distance_loss_weight
self.duration_loss_weight = duration_loss_weight
self.visual_prompter_type = visual_prompter_type
self.visual_prompter_apply = visual_prompter_apply
self.visual_prompt_size = visual_prompt_size
self.max_img_size = max_img_size
self.num_frames = num_frames
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.max_grid_col_position_embeddings = max_grid_col_position_embeddings
self.max_grid_row_position_embeddings = max_grid_row_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.hidden_dropout_prob = hidden_dropout_prob
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.attention_probs_dropout_prob = attention_probs_dropout_prob
@property
def sub_configs(self):
return {'backbone_config': type(self.backbone_config)} if getattr(self, 'backbone_config', None) is not None else {}
@classmethod
def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):
"""Instantiate a [`TvpConfig`] (or a derived class) from a pre-trained backbone model configuration.
Args:
backbone_config ([`PretrainedConfig`]):
The backbone configuration.
Returns:
[`TvpConfig`]: An instance of a configuration object
"""
return cls(backbone_config=backbone_config, **kwargs)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
if output['backbone_config'] is not None:
output['backbone_config'] = self.backbone_config.to_dict()
output['model_type'] = self.__class__.model_type
return output
|
class TvpConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`TvpModel`]. It is used to instantiate an Tvp
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Tvp
[Intel/tvp-base](https://huggingface.co/Intel/tvp-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone_config (`PretrainedConfig` or `dict`, *optional*):
The configuration of the backbone model.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
distance_loss_weight (`float`, *optional*, defaults to 1.0):
The weight of distance loss.
duration_loss_weight (`float`, *optional*, defaults to 0.1):
The weight of duration loss.
visual_prompter_type (`str`, *optional*, defaults to `"framepad"`):
Visual prompt type. The type of padding. Framepad means padding on each frame. Should be one of "framepad"
or "framedownpad"
visual_prompter_apply (`str`, *optional*, defaults to `"replace"`):
The way of applying visual prompt. Replace means use the value of prompt to change the original value in
visual inputs. Should be one of "replace", or "add", or "remove".
visual_prompt_size (`int`, *optional*, defaults to 96):
The size of visual prompt.
max_img_size (`int`, *optional*, defaults to 448):
The maximum size of frame.
num_frames (`int`, *optional*, defaults to 48):
The number of frames extracted from a video.
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Tvp text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`TvpModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
max_grid_col_position_embeddings (`int`, *optional*, defaults to 100):
The largest number of horizontal patches from a video frame.
max_grid_row_position_embeddings (`int`, *optional*, defaults to 100):
The largest number of vertical patches from a video frame.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability of hidden layers.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability of attention layers.
'''
def __init__(self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, distance_loss_weight=1.0, duration_loss_weight=0.1, visual_prompter_type='framepad', visual_prompter_apply='replace', visual_prompt_size=96, max_img_size=448, num_frames=48, vocab_size=30522, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, max_position_embeddings=512, max_grid_col_position_embeddings=100, max_grid_row_position_embeddings=100, hidden_dropout_prob=0.1, hidden_act='gelu', layer_norm_eps=1e-12, initializer_range=0.02, attention_probs_dropout_prob=0.1, **kwargs):
pass
@property
def sub_configs(self):
pass
@classmethod
def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):
'''Instantiate a [`TvpConfig`] (or a derived class) from a pre-trained backbone model configuration.
Args:
backbone_config ([`PretrainedConfig`]):
The backbone configuration.
Returns:
[`TvpConfig`]: An instance of a configuration object
'''
pass
def to_dict(self):
'''
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
'''
pass
| 7
| 3
| 31
| 1
| 26
| 4
| 2
| 1
| 1
| 2
| 0
| 0
| 2
| 25
| 3
| 3
| 171
| 11
| 80
| 62
| 47
| 80
| 44
| 33
| 40
| 3
| 1
| 1
| 6
|
5,675
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/image_processing_tvp.py
|
transformers.models.tvp.image_processing_tvp.TvpImageProcessor
|
from ...image_transforms import PaddingMode, flip_channel_order, pad, resize, to_channel_dimension_format
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_valid_image, to_numpy_array, valid_images, validate_preprocess_arguments
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
from collections.abc import Iterable
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from typing import Optional, Union
class TvpImageProcessor(BaseImageProcessor):
"""
Constructs a Tvp image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"longest_edge": 448}`):
Size of the output image after resizing. The longest edge of the image will be resized to
`size["longest_edge"]` while maintaining the aspect ratio of the original image. Can be overridden by
`size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop`
parameter in the `preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method.
pad_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the
`preprocess` method.
constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0):
The fill value to use when padding the image.
pad_mode (`PaddingMode`, *optional*, defaults to `PaddingMode.CONSTANT`):
Use what kind of mode in padding.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
do_flip_channel_order (`bool`, *optional*, defaults to `True`):
Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_pad: bool=True, pad_size: Optional[dict[str, int]]=None, constant_values: Union[float, Iterable[float]]=0, pad_mode: PaddingMode=PaddingMode.CONSTANT, do_normalize: bool=True, do_flip_channel_order: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'longest_edge': 448}
crop_size = crop_size if crop_size is not None else {'height': 448, 'width': 448}
pad_size = pad_size if pad_size is not None else {'height': 448, 'width': 448}
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
self.pad_size = pad_size
self.constant_values = constant_values
self.pad_mode = pad_mode
self.do_normalize = do_normalize
self.do_flip_channel_order = do_flip_channel_order
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
have the size `(h, w)`. If `size` is of the form `{"longest_edge": s}`, the output image will have its
longest edge of length `s` while keeping the aspect ratio of the original image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size, default_to_square=False)
if 'height' in size and 'width' in size:
output_size = (size['height'], size['width'])
elif 'longest_edge' in size:
output_size = get_resize_output_image_size(image, size['longest_edge'], input_data_format)
else:
raise ValueError(f"Size must have 'height' and 'width' or 'longest_edge' as keys. Got {size.keys()}")
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def pad_image(self, image: np.ndarray, pad_size: Optional[dict[str, int]]=None, constant_values: Union[float, Iterable[float]]=0, pad_mode: PaddingMode=PaddingMode.CONSTANT, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):
"""
Pad an image with zeros to the given size.
Args:
image (`np.ndarray`):
Image to pad.
pad_size (`dict[str, int]`)
Size of the output image with pad.
constant_values (`Union[float, Iterable[float]]`)
The fill value to use when padding the image.
pad_mode (`PaddingMode`)
The pad mode, default to PaddingMode.CONSTANT
data_format (`ChannelDimension` or `str`, *optional*)
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
height, width = get_image_size(image, channel_dim=input_data_format)
max_height = pad_size.get('height', height)
max_width = pad_size.get('width', width)
pad_right, pad_bottom = (max_width - width, max_height - height)
if pad_right < 0 or pad_bottom < 0:
raise ValueError('The padding size must be greater than image size')
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(image, padding, mode=pad_mode, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
return padded_image
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_pad: bool=True, pad_size: Optional[dict[str, int]]=None, constant_values: Optional[Union[float, Iterable[float]]]=None, pad_mode: PaddingMode=None, do_normalize: Optional[bool]=None, do_flip_channel_order: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""Preprocesses a single image."""
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
image = to_numpy_array(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image.astype(np.float32), mean=image_mean, std=image_std, input_data_format=input_data_format)
if do_pad:
image = self.pad_image(image=image, pad_size=pad_size, constant_values=constant_values, pad_mode=pad_mode, input_data_format=input_data_format)
if do_flip_channel_order:
image = flip_channel_order(image=image, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
@filter_out_non_signature_kwargs()
def preprocess(self, videos: Union[ImageInput, list[ImageInput], list[list[ImageInput]]], do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_pad: Optional[bool]=None, pad_size: Optional[dict[str, int]]=None, constant_values: Optional[Union[float, Iterable[float]]]=None, pad_mode: PaddingMode=None, do_normalize: Optional[bool]=None, do_flip_channel_order: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
videos (`ImageInput` or `list[ImageInput]` or `list[list[ImageInput]]`):
Frames to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after applying resize.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`):
Whether to centre crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after applying the centre crop.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method.
pad_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the
`preprocess` method.
constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0):
The fill value to use when padding the image.
pad_mode (`PaddingMode`, *optional*, defaults to "PaddingMode.CONSTANT"):
Use what kind of mode in padding.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
Whether to flip the channel order of the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the inferred channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_pad = do_pad if do_pad is not None else self.do_pad
pad_size = pad_size if pad_size is not None else self.pad_size
constant_values = constant_values if constant_values is not None else self.constant_values
pad_mode = pad_mode if pad_mode else self.pad_mode
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_flip_channel_order = do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name='crop_size')
if not valid_images(videos):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
videos = make_batched(videos)
videos = [np.array([self._preprocess_image(image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_pad=do_pad, pad_size=pad_size, constant_values=constant_values, pad_mode=pad_mode, do_normalize=do_normalize, do_flip_channel_order=do_flip_channel_order, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for img in video]) for video in videos]
data = {'pixel_values': videos}
return BatchFeature(data=data, tensor_type=return_tensors)
|
class TvpImageProcessor(BaseImageProcessor):
'''
Constructs a Tvp image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"longest_edge": 448}`):
Size of the output image after resizing. The longest edge of the image will be resized to
`size["longest_edge"]` while maintaining the aspect ratio of the original image. Can be overridden by
`size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop`
parameter in the `preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method.
pad_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the
`preprocess` method.
constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0):
The fill value to use when padding the image.
pad_mode (`PaddingMode`, *optional*, defaults to `PaddingMode.CONSTANT`):
Use what kind of mode in padding.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
do_flip_channel_order (`bool`, *optional*, defaults to `True`):
Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_pad: bool=True, pad_size: Optional[dict[str, int]]=None, constant_values: Union[float, Iterable[float]]=0, pad_mode: PaddingMode=PaddingMode.CONSTANT, do_normalize: bool=True, do_flip_channel_order: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
have the size `(h, w)`. If `size` is of the form `{"longest_edge": s}`, the output image will have its
longest edge of length `s` while keeping the aspect ratio of the original image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def pad_image(self, image: np.ndarray, pad_size: Optional[dict[str, int]]=None, constant_values: Union[float, Iterable[float]]=0, pad_mode: PaddingMode=PaddingMode.CONSTANT, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):
'''
Pad an image with zeros to the given size.
Args:
image (`np.ndarray`):
Image to pad.
pad_size (`dict[str, int]`)
Size of the output image with pad.
constant_values (`Union[float, Iterable[float]]`)
The fill value to use when padding the image.
pad_mode (`PaddingMode`)
The pad mode, default to PaddingMode.CONSTANT
data_format (`ChannelDimension` or `str`, *optional*)
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_pad: bool=True, pad_size: Optional[dict[str, int]]=None, constant_values: Optional[Union[float, Iterable[float]]]=None, pad_mode: PaddingMode=None, do_normalize: Optional[bool]=None, do_flip_channel_order: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''Preprocesses a single image.'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, videos: Union[ImageInput, list[ImageInput], list[list[ImageInput]]], do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_pad: Optional[bool]=None, pad_size: Optional[dict[str, int]]=None, constant_values: Optional[Union[float, Iterable[float]]]=None, pad_mode: PaddingMode=None, do_normalize: Optional[bool]=None, do_flip_channel_order: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
videos (`ImageInput` or `list[ImageInput]` or `list[list[ImageInput]]`):
Frames to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after applying resize.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`):
Whether to centre crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after applying the centre crop.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method.
pad_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the
`preprocess` method.
constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0):
The fill value to use when padding the image.
pad_mode (`PaddingMode`, *optional*, defaults to "PaddingMode.CONSTANT"):
Use what kind of mode in padding.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
Whether to flip the channel order of the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the inferred channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 7
| 5
| 68
| 5
| 45
| 18
| 7
| 0.61
| 1
| 9
| 3
| 0
| 5
| 15
| 5
| 25
| 395
| 30
| 227
| 108
| 143
| 139
| 80
| 30
| 74
| 17
| 3
| 1
| 35
|
5,676
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpAttention
|
from ...pytorch_utils import prune_linear_layer
import math
from torch import nn
import torch
from typing import Optional
class TvpAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attn_dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.num_attention_heads, self.attention_head_size)
heads = set(heads) - self.pruned_heads
for head in heads:
head = head - sum((1 if h < head else 0 for h in self.pruned_heads))
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = self.attention_head_size * self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def _reshape(self, tensor: torch.Tensor, sequence_length: int, batch_size: int):
return tensor.view(batch_size, sequence_length, self.num_attention_heads, self.attention_head_size).transpose(1, 2).contiguous()
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions: Optional[bool]=None):
batch_size, sequence_length = hidden_states.shape[:2]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self._reshape(mixed_query_layer, sequence_length, batch_size)
key_layer = self._reshape(mixed_key_layer, sequence_length, batch_size)
value_layer = self._reshape(mixed_value_layer, sequence_length, batch_size)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.attn_dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
attn_output = torch.matmul(attention_probs, value_layer)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(batch_size, sequence_length, self.all_head_size)
attn_output = self.dense(attn_output)
attn_output = self.dropout(attn_output)
attn_output = self.layer_norm(attn_output + hidden_states)
outputs = (attn_output, attention_probs) if output_attentions else (attn_output,)
return outputs
|
class TvpAttention(nn.Module):
def __init__(self, config):
pass
def prune_heads(self, heads):
pass
def _reshape(self, tensor: torch.Tensor, sequence_length: int, batch_size: int):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions: Optional[bool]=None):
pass
| 5
| 0
| 23
| 3
| 18
| 3
| 3
| 0.14
| 1
| 6
| 0
| 0
| 4
| 11
| 4
| 14
| 96
| 16
| 71
| 36
| 60
| 10
| 59
| 30
| 54
| 4
| 1
| 1
| 11
|
5,677
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpEncodeLayer
|
from typing import Optional
from ...modeling_layers import GradientCheckpointingLayer
class TvpEncodeLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = TvpAttention(config)
self.intermediate = TvpIntermediate(config)
self.output = TvpOutputLayer(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions: Optional[bool]=None):
self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
|
class TvpEncodeLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions: Optional[bool]=None):
pass
| 3
| 0
| 12
| 0
| 12
| 1
| 1
| 0.04
| 1
| 5
| 3
| 0
| 2
| 3
| 2
| 12
| 26
| 1
| 25
| 17
| 16
| 1
| 14
| 11
| 11
| 1
| 1
| 0
| 2
|
5,678
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpEncoder
|
import torch
from typing import Optional
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ModelOutput
from torch import nn
class TvpEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([TvpEncodeLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states, attention_mask=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
outputs = (hidden_states,)
if output_hidden_states:
outputs = outputs + (all_hidden_states,)
if output_attentions:
outputs = outputs + (all_attentions,)
return outputs
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states if output_hidden_states else None, attentions=all_attentions if output_attentions else None)
|
class TvpEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
pass
| 3
| 0
| 29
| 3
| 26
| 1
| 8
| 0.04
| 1
| 6
| 2
| 0
| 2
| 3
| 2
| 12
| 60
| 7
| 52
| 19
| 41
| 2
| 31
| 11
| 28
| 15
| 1
| 2
| 16
|
5,679
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpForVideoGrounding
|
import torch
from typing import Optional
from ...utils import auto_docstring, logging
@auto_docstring(custom_intro='\n Tvp Model with a video grounding head on top computing IoU, distance, and duration loss.\n ')
class TvpForVideoGrounding(TvpPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.model = TvpModel(config)
self.video_grounding_head = TvpVideoGroundingHead(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, labels: Optional[tuple[torch.Tensor]]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False):
"""
labels (`torch.FloatTensor` of shape `(batch_size, 3)`, *optional*):
The labels contains duration, start time, and end time of the video corresponding to the text.
Examples:
```python
>>> import torch
>>> from transformers import AutoConfig, AutoTokenizer, TvpForVideoGrounding
>>> model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp")
>>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp")
>>> pixel_values = torch.rand(1, 1, 3, 448, 448)
>>> text_inputs = tokenizer("This is an example input", return_tensors="pt")
>>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask)
```"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
outputs = self.model(input_ids, pixel_values, attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding)
pooler_output = outputs[1]
logits = self.video_grounding_head(pooler_output)
loss = None
if labels is not None:
criterion = TvpLoss(['iou', 'distance', 'duration'])
criterion.to(self.device)
loss_dict = criterion(logits, labels)
loss = loss_dict['iou'] + self.config.distance_loss_weight * loss_dict['distance'] + self.config.duration_loss_weight * loss_dict['duration']
if not return_dict:
outputs = (logits,) + outputs[2:]
if loss is not None:
outputs = (loss,) + outputs
return outputs
return TvpVideoGroundingOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Tvp Model with a video grounding head on top computing IoU, distance, and duration loss.\n ')
class TvpForVideoGrounding(TvpPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, labels: Optional[tuple[torch.Tensor]]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False):
'''
labels (`torch.FloatTensor` of shape `(batch_size, 3)`, *optional*):
The labels contains duration, start time, and end time of the video corresponding to the text.
Examples:
```python
>>> import torch
>>> from transformers import AutoConfig, AutoTokenizer, TvpForVideoGrounding
>>> model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp")
>>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp")
>>> pixel_values = torch.rand(1, 1, 3, 448, 448)
>>> text_inputs = tokenizer("This is an example input", return_tensors="pt")
>>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask)
```'''
pass
| 5
| 1
| 37
| 4
| 26
| 7
| 3
| 0.25
| 1
| 7
| 4
| 0
| 2
| 3
| 2
| 3
| 77
| 8
| 55
| 24
| 39
| 14
| 24
| 12
| 21
| 5
| 2
| 2
| 6
|
5,680
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpFrameDownPadPrompter
|
import torch
from torch import nn
class TvpFrameDownPadPrompter(nn.Module):
"""
Pad frames extracted from videos only at the bottom.
"""
def __init__(self, config):
if config.visual_prompter_apply not in ('add', 'replace', 'remove'):
raise ValueError('`visual_prompter_apply` must be in (add, replace, remove)')
super().__init__()
self.visual_prompt_size = config.visual_prompt_size
self.frame_num = config.frame_num
self.max_img_size = config.max_img_size
self.visual_prompter_apply = config.visual_prompter_apply
self.pad_down = nn.Parameter(torch.randn([1, config.frame_num, 3, config.visual_prompt_size, config.max_img_size]))
def forward(self, pixel_values):
if self.visual_prompter_apply != 'add':
visual_prompt_mask = torch.ones([self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device)
visual_prompt_mask[self.max_img_size - self.visual_prompt_size:self.max_img_size, :] = 0.0
pixel_values *= visual_prompt_mask
if self.visual_prompter_apply != 'remove':
prompt = torch.zeros([pixel_values.shape[0], pixel_values.shape[1], 3, self.max_img_size, self.max_img_size], device=pixel_values.device)
start_point = self.max_img_size - self.visual_prompt_size
prompt[:, :, :, start_point:self.max_img_size, :] = self.pad_down
pixel_values += prompt.to(pixel_values.dtype)
return pixel_values
|
class TvpFrameDownPadPrompter(nn.Module):
'''
Pad frames extracted from videos only at the bottom.
'''
def __init__(self, config):
pass
def forward(self, pixel_values):
pass
| 3
| 1
| 15
| 1
| 14
| 0
| 3
| 0.11
| 1
| 2
| 0
| 0
| 2
| 5
| 2
| 12
| 35
| 4
| 28
| 11
| 25
| 3
| 21
| 11
| 18
| 3
| 1
| 1
| 5
|
5,681
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpFramePadPrompter
|
from torch import nn
import torch
class TvpFramePadPrompter(nn.Module):
"""
Pad frames extracted from videos in the surroundings.
"""
def __init__(self, config):
if config.visual_prompter_apply not in ('add', 'replace', 'remove'):
raise ValueError('`visual_prompter_apply` must be in (add, replace, remove)')
super().__init__()
self.num_frames = config.num_frames
self.max_img_size = config.max_img_size
self.visual_prompter_apply = config.visual_prompter_apply
self.base_size = config.max_img_size - config.visual_prompt_size * 2
self.pad_up = nn.Parameter(torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size]))
self.pad_down = nn.Parameter(torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size]))
self.pad_left = nn.Parameter(torch.randn([1, config.num_frames, 3, config.max_img_size - config.visual_prompt_size * 2, config.visual_prompt_size]))
self.pad_right = nn.Parameter(torch.randn([1, config.num_frames, 3, config.max_img_size - config.visual_prompt_size * 2, config.visual_prompt_size]))
def interpolate_pad_encoding(self, prompt: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained pad weights, to be able to use the model on collection of high
resolution images (high resolution videos).
"""
h0, w0 = (height / self.max_img_size, width / self.max_img_size)
batch, num_frames, channels, prompt_height, prompt_width = prompt.shape
prompt = prompt.reshape(batch * num_frames, channels, prompt_height, prompt_width)
prompt = nn.functional.interpolate(prompt, scale_factor=(h0, w0), mode='bicubic', align_corners=False)
prompt = prompt.reshape(batch, num_frames, channels, height, width)
return prompt
def forward(self, pixel_values, interpolate_pad_encoding: bool=False):
height, width = (pixel_values.shape[-2], pixel_values.shape[-1]) if interpolate_pad_encoding else (self.max_img_size, self.max_img_size)
if self.visual_prompter_apply not in ('add', 'remove', 'replace'):
raise ValueError(f'Invalid visual_prompter_apply value {self.visual_prompter_apply}')
if self.visual_prompter_apply in ('replace', 'remove'):
visual_prompt_mask = torch.ones([height, width], dtype=pixel_values.dtype, device=pixel_values.device)
pixel_values *= visual_prompt_mask
if self.visual_prompter_apply in ('replace', 'add'):
base = torch.zeros(1, self.num_frames, 3, self.base_size, self.base_size, device=pixel_values.device)
prompt = torch.cat([self.pad_left, base, self.pad_right], dim=4)
prompt = torch.cat([self.pad_up, prompt, self.pad_down], dim=3)
prompt = torch.cat(pixel_values.size(0) * [prompt])
if interpolate_pad_encoding:
prompt = self.interpolate_pad_encoding(prompt, height, width)
pixel_values = pixel_values + prompt.to(pixel_values.dtype)
return pixel_values
|
class TvpFramePadPrompter(nn.Module):
'''
Pad frames extracted from videos in the surroundings.
'''
def __init__(self, config):
pass
def interpolate_pad_encoding(self, prompt: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained pad weights, to be able to use the model on collection of high
resolution images (high resolution videos).
'''
pass
def forward(self, pixel_values, interpolate_pad_encoding: bool=False):
pass
| 4
| 2
| 27
| 2
| 23
| 2
| 3
| 0.14
| 1
| 5
| 0
| 0
| 3
| 8
| 3
| 13
| 88
| 9
| 69
| 18
| 65
| 10
| 36
| 18
| 32
| 6
| 1
| 2
| 9
|
5,682
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpIntermediate
|
from torch import nn
from ...activations import ACT2FN
import torch
class TvpIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class TvpIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
5,683
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpLoss
|
import torch
from torch import nn
class TvpLoss(nn.Module):
"""
This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute
hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched
ground-truth / prediction (supervise class and box).
Args:
losses (`list[str]`):
List of all the losses to be applied.
"""
def __init__(self, losses):
super().__init__()
self.loss_map = {'iou': self.loss_iou, 'distance': self.loss_distance, 'duration': self.loss_duration}
for loss in losses:
if loss not in self.loss_map:
raise ValueError(f'Loss {loss} not supported')
self.losses = losses
def loss_iou(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):
"""
Measure the intersection over union.
"""
inter = torch.min(candidates_end_time, end_time) - torch.max(candidates_start_time, start_time)
union = torch.max(candidates_end_time, end_time) - torch.min(candidates_start_time, start_time)
iou = 1 - inter.clamp(min=0) / union
return iou
def loss_distance(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):
"""
Measure the distance of mid points.
"""
mid_candidates = torch.div(torch.add(candidates_start_time, candidates_end_time), 2.0)
mid_groundtruth = torch.div(torch.add(start_time, end_time), 2.0)
distance_diff = torch.div(torch.max(mid_candidates, mid_groundtruth) - torch.min(mid_candidates, mid_groundtruth), duration).clamp(min=0.2)
return distance_diff
def loss_duration(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):
"""
Measure the difference of duration.
"""
duration_candidates = torch.sub(candidates_end_time, candidates_start_time)
duration_groundtruth = torch.sub(end_time, start_time)
duration_diff = torch.square(torch.div(torch.sub(duration_candidates, duration_groundtruth), duration))
duration_diff = duration_diff.clamp(min=0.4)
return duration_diff
def forward(self, logits, labels):
"""
This performs the loss computation.
Args:
logits (`torch.FloatTensor`):
The output logits of head module.
labels (`list[torch.FloatTensor]`):
List of tensors ([start, end, duration]), which contains start time, end time of the video corresponding to the text, and also the duration.
"""
duration, start_time, end_time = labels
candidates = torch.mul(logits, duration)
candidates_start_time, candidates_end_time = (candidates[:, 0].float(), candidates[:, 1].float())
losses_dict = {}
for loss in self.losses:
losses_dict.update({loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)})
return losses_dict
|
class TvpLoss(nn.Module):
'''
This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute
hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched
ground-truth / prediction (supervise class and box).
Args:
losses (`list[str]`):
List of all the losses to be applied.
'''
def __init__(self, losses):
pass
def loss_iou(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):
'''
Measure the intersection over union.
'''
pass
def loss_distance(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):
'''
Measure the distance of mid points.
'''
pass
def loss_duration(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):
'''
Measure the difference of duration.
'''
pass
def forward(self, logits, labels):
'''
This performs the loss computation.
Args:
logits (`torch.FloatTensor`):
The output logits of head module.
labels (`list[torch.FloatTensor]`):
List of tensors ([start, end, duration]), which contains start time, end time of the video corresponding to the text, and also the duration.
'''
pass
| 6
| 5
| 13
| 1
| 8
| 3
| 2
| 0.63
| 1
| 2
| 0
| 0
| 5
| 2
| 5
| 15
| 78
| 13
| 40
| 23
| 34
| 25
| 32
| 23
| 26
| 3
| 1
| 2
| 8
|
5,684
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpModel
|
from ...utils import auto_docstring, logging
from typing import Optional
from torch import nn
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ModelOutput
@auto_docstring(custom_intro='\n The bare Tvp Model transformer outputting BaseModelOutputWithPooling object without any specific head on top.\n ')
class TvpModel(TvpPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.vision_model = TvpVisionModel(config)
self.embeddings = TvpTextInputEmbeddings(config)
self.visual_embeddings = TvpVisualInputEmbedding(config)
self.encoder = TvpEncoder(config)
self.pooler = TvpPooler(config)
self.text_prompt = nn.Parameter(torch.randn([1, 10, config.hidden_size]))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if config.visual_prompter_type not in TVP_PROMPTER_CLASSES_MAPPING:
raise ValueError('`visual_prompter_type` must be in (framedownpad, framepad)')
self.visual_prompter = TVP_PROMPTER_CLASSES_MAPPING[config.visual_prompter_type](config)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False):
"""
Examples:
```python
>>> import torch
>>> from transformers import AutoConfig, AutoTokenizer, TvpModel
>>> model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp")
>>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp")
>>> pixel_values = torch.rand(1, 1, 3, 448, 448)
>>> text_inputs = tokenizer("This is an example input", return_tensors="pt")
>>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask)
```"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
pixel_values = self.vision_model(self.visual_prompter(pixel_values, interpolate_pad_encoding=interpolate_pos_encoding))
text_embedding_output = self.embeddings(input_ids=input_ids)
visual_embedding_output = self.visual_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
if attention_mask is not None:
visual_attention_mask = attention_mask.new_ones(visual_embedding_output.shape[:2])
pt_mask = torch.ones(attention_mask.shape[0], 10).to(device=attention_mask.device, dtype=attention_mask.dtype)
attention_mask = torch.cat([pt_mask, attention_mask, visual_attention_mask], dim=-1)
attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.size()).to(input_ids.device)
text_prompt = self.text_prompt.expand(text_embedding_output.shape[0], -1, -1)
embedding_output = torch.cat([text_prompt, text_embedding_output, visual_embedding_output], dim=1)
encoder_outputs = self.encoder(embedding_output, attention_mask=attention_mask, head_mask=self.get_head_mask(head_mask, self.config.num_hidden_layers), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs.last_hidden_state if return_dict else encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
last_hidden_state = self.dropout(last_hidden_state)
pooled_output = self.dropout(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring(custom_intro='\n The bare Tvp Model transformer outputting BaseModelOutputWithPooling object without any specific head on top.\n ')
class TvpModel(TvpPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False):
'''
Examples:
```python
>>> import torch
>>> from transformers import AutoConfig, AutoTokenizer, TvpModel
>>> model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp")
>>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp")
>>> pixel_values = torch.rand(1, 1, 3, 448, 448)
>>> text_inputs = tokenizer("This is an example input", return_tensors="pt")
>>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask)
```'''
pass
| 8
| 2
| 20
| 1
| 14
| 4
| 2
| 0.31
| 1
| 9
| 6
| 0
| 5
| 9
| 5
| 6
| 105
| 11
| 72
| 36
| 54
| 22
| 42
| 25
| 36
| 5
| 2
| 1
| 11
|
5,685
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpOutputLayer
|
import torch
from torch import nn
class TvpOutputLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.layer_norm(hidden_states + input_tensor)
return hidden_states
|
class TvpOutputLayer(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
5,686
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpPooler
|
from torch import nn
import torch
class TvpPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class TvpPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
5,687
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpPreTrainedModel
|
from ...utils import auto_docstring, logging
from torch import nn
from .configuration_tvp import TvpConfig
from ...modeling_utils import PreTrainedModel
@auto_docstring
class TvpPreTrainedModel(PreTrainedModel):
config: TvpConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, TvpModel):
nn.init.normal_(module.text_prompt)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
if hasattr(module, 'pad_up'):
nn.init.normal_(module.pad_up)
if hasattr(module, 'pad_down'):
nn.init.normal_(module.pad_down)
if hasattr(module, 'pad_left'):
nn.init.normal_(module.pad_left)
if hasattr(module, 'pad_right'):
nn.init.normal_(module.pad_right)
|
@auto_docstring
class TvpPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
'''Initialize the weights'''
pass
| 3
| 1
| 17
| 2
| 12
| 3
| 6
| 0.38
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 26
| 4
| 16
| 5
| 14
| 6
| 15
| 5
| 13
| 6
| 1
| 2
| 6
|
5,688
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpTextInputEmbeddings
|
from torch import nn
import torch
class TvpTextInputEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class TvpTextInputEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
pass
| 3
| 1
| 15
| 2
| 14
| 0
| 4
| 0.04
| 1
| 1
| 0
| 0
| 2
| 5
| 2
| 12
| 34
| 5
| 28
| 14
| 25
| 1
| 27
| 14
| 24
| 6
| 1
| 1
| 7
|
5,689
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpVideoGroundingHead
|
from torch import nn
class TvpVideoGroundingHead(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_0 = nn.Linear(config.hidden_size, config.hidden_size * 2)
self.layer_1 = nn.Linear(config.hidden_size * 2, 2)
self.activation_0 = nn.ReLU()
self.activation_1 = nn.Sigmoid()
def forward(self, pooler_output):
logits = self.activation_0(self.layer_0(pooler_output))
logits = self.activation_1(self.layer_1(logits))
return logits
|
class TvpVideoGroundingHead(nn.Module):
def __init__(self, config):
pass
def forward(self, pooler_output):
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 12
| 1
| 11
| 8
| 8
| 0
| 11
| 8
| 8
| 1
| 1
| 0
| 2
|
5,690
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpVideoGroundingOutput
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ModelOutput
import torch
from typing import Optional
from dataclasses import dataclass
from ...utils import auto_docstring, logging
@dataclass
@auto_docstring
class TvpVideoGroundingOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Temporal-Distance IoU loss for video grounding.
logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Contains start_time/duration and end_time/duration. It is the time slot of the videos corresponding to the
input texts.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring
class TvpVideoGroundingOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Temporal-Distance IoU loss for video grounding.
logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Contains start_time/duration and end_time/duration. It is the time slot of the videos corresponding to the
input texts.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 1
| 5
| 5
| 4
| 15
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
5,691
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpVisionModel
|
from ...utils.backbone_utils import load_backbone
from torch import nn
class TvpVisionModel(nn.Module):
def __init__(self, config):
super().__init__()
self.backbone = load_backbone(config)
if config.backbone_config is not None:
in_channels = config.backbone_config.hidden_sizes[-1]
elif hasattr(self.backbone, 'config') and hasattr(self.backbone.config, 'hidden_sizes'):
in_channels = self.backbone.config.hidden_sizes[-1]
elif hasattr(self.backbone, 'config') and hasattr(self.backbone.config, 'hidden_size'):
in_channels = self.backbone.config.hidden_size
else:
raise ValueError('Backbone config not found')
self.grid_encoder_conv = nn.Conv2d(in_channels, config.hidden_size, kernel_size=3, stride=1, padding=1, groups=1, bias=False)
def forward(self, pixel_values):
batch_size, num_frames, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.view(batch_size * num_frames, num_channels, height, width)
grid_feat_outputs = self.backbone(pixel_values)['feature_maps'][0]
grid = self.grid_encoder_conv(grid_feat_outputs)
grid = nn.functional.max_pool2d(grid, kernel_size=2, stride=2)
grid = nn.functional.relu(grid, inplace=True)
new_channel, new_height, new_width = grid.shape[-3:]
grid = grid.view(batch_size, num_frames, new_channel, new_height, new_width)
grid = grid.permute(0, 1, 3, 4, 2)
return grid
|
class TvpVisionModel(nn.Module):
def __init__(self, config):
pass
def forward(self, pixel_values):
pass
| 3
| 0
| 18
| 1
| 16
| 2
| 3
| 0.09
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 38
| 3
| 32
| 10
| 29
| 3
| 21
| 10
| 18
| 4
| 1
| 1
| 5
|
5,692
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/modeling_tvp.py
|
transformers.models.tvp.modeling_tvp.TvpVisualInputEmbedding
|
import torch
from torch import nn
class TvpVisualInputEmbedding(nn.Module):
"""
Takes input of both image and video (multi-frame)
"""
def __init__(self, config):
super().__init__()
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.row_position_embeddings = nn.Embedding(config.max_grid_row_position_embeddings, config.hidden_size)
self.col_position_embeddings = nn.Embedding(config.max_grid_col_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(1, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.max_grid_row_position_embeddings = config.max_grid_row_position_embeddings
self.max_grid_col_position_embeddings = config.max_grid_col_position_embeddings
def interpolate_pos_encoding(self, embedding: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained pad weights , to be able to use the model on collection of high
resolution images (high resolution videos).
"""
h0 = w0 = 1
if height > self.max_grid_row_position_embeddings:
h0 = height / self.max_grid_row_position_embeddings
if width > self.max_grid_col_position_embeddings:
w0 = width / self.max_grid_col_position_embeddings
embedding = embedding.permute(0, 3, 1, 2)
embedding = nn.functional.interpolate(embedding, scale_factor=(h0, w0), mode='bicubic', align_corners=False)
embedding = embedding.permute(0, 2, 3, 1)
return embedding
def add_2d_positional_embeddings(self, grid, interpolate_pos_encoding: bool=False):
"""
Args:
grid: (batch_size, height, width, hidden_dim)
interpolate_pos_encoding: (`bool`, *optional*, defaults to `False`):
Whether to interpolate the pre-trained position encodings.
Returns:
grid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim)
"""
batch_size, height, width, hidden_dim = grid.shape
row_height = min(self.max_grid_row_position_embeddings, height)
row_position_ids = torch.arange(row_height, dtype=torch.long, device=grid.device)
row_position_embeddings = self.row_position_embeddings(row_position_ids)
row_shape = (1,) * (len(grid.shape) - 3) + (row_height, 1, hidden_dim)
row_position_embeddings = row_position_embeddings.view(*row_shape)
row_width = min(self.max_grid_col_position_embeddings, width)
col_position_ids = torch.arange(row_width, dtype=torch.long, device=grid.device)
col_position_embeddings = self.col_position_embeddings(col_position_ids)
col_shape = (batch_size, 1, row_width, hidden_dim)
col_position_embeddings = col_position_embeddings.view(*col_shape)
positional_embeddings = row_position_embeddings + col_position_embeddings
if interpolate_pos_encoding and (height > self.max_grid_row_position_embeddings or width > self.max_grid_col_position_embeddings):
grid = grid + self.interpolate_pos_encoding(positional_embeddings, height, width)
else:
grid = grid + positional_embeddings
return grid
def forward(self, grid, interpolate_pos_encoding: bool=False):
"""
Args:
grid: Array of shape (batch_size, num_frames, height, width, num_channels).
It contains processed frames extracted from videos, and is generated by Tvp image preprocessor. Note,
num_frames can be 1
interpolate_pos_encoding: (bool, *optional*, defaults to `False`):
Whether to interpolate the pre-trained position encodings.
Returns:
embeddings: The embedding of grid with size (batch_size, height*width, num_channels)
"""
batch_size, num_frames, height, width, num_channels = grid.shape
grid = grid.mean(1)
grid = self.add_2d_positional_embeddings(grid, interpolate_pos_encoding=interpolate_pos_encoding)
visual_tokens = grid.view(batch_size, -1, num_channels)
visual_tokens_shape = visual_tokens.shape[:-1]
device = visual_tokens.device
token_type_ids = torch.zeros(visual_tokens_shape, dtype=torch.long, device=device)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = visual_tokens + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class TvpVisualInputEmbedding(nn.Module):
'''
Takes input of both image and video (multi-frame)
'''
def __init__(self, config):
pass
def interpolate_pos_encoding(self, embedding: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained pad weights , to be able to use the model on collection of high
resolution images (high resolution videos).
'''
pass
def add_2d_positional_embeddings(self, grid, interpolate_pos_encoding: bool=False):
'''
Args:
grid: (batch_size, height, width, hidden_dim)
interpolate_pos_encoding: (`bool`, *optional*, defaults to `False`):
Whether to interpolate the pre-trained position encodings.
Returns:
grid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim)
'''
pass
def forward(self, grid, interpolate_pos_encoding: bool=False):
'''
Args:
grid: Array of shape (batch_size, num_frames, height, width, num_channels).
It contains processed frames extracted from videos, and is generated by Tvp image preprocessor. Note,
num_frames can be 1
interpolate_pos_encoding: (bool, *optional*, defaults to `False`):
Whether to interpolate the pre-trained position encodings.
Returns:
embeddings: The embedding of grid with size (batch_size, height*width, num_channels)
'''
pass
| 5
| 4
| 26
| 2
| 15
| 10
| 2
| 0.71
| 1
| 4
| 0
| 0
| 4
| 8
| 4
| 14
| 111
| 12
| 59
| 31
| 54
| 42
| 51
| 31
| 46
| 3
| 1
| 1
| 7
|
5,693
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tvp/processing_tvp.py
|
transformers.models.tvp.processing_tvp.TvpProcessor
|
from ...processing_utils import ProcessingKwargs, ProcessorMixin
class TvpProcessor(ProcessorMixin):
"""
Constructs an TVP processor which wraps a TVP image processor and a Bert tokenizer into a single processor.
[`TvpProcessor`] offers all the functionalities of [`TvpImageProcessor`] and [`BertTokenizerFast`]. See the
[`~TvpProcessor.__call__`] and [`~TvpProcessor.decode`] for more information.
Args:
image_processor ([`TvpImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`BertTokenizerFast`], *optional*):
The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'TvpImageProcessor'
tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
super().__init__(image_processor, tokenizer)
self.video_processor = image_processor
def post_process_video_grounding(self, logits, video_durations):
"""
Compute the time of the video.
Args:
logits (`torch.Tensor`):
The logits output of TvpForVideoGrounding.
video_durations (`float`):
The video's duration.
Returns:
start (`float`):
The start time of the video.
end (`float`):
The end time of the video.
"""
start, end = (round(logits.tolist()[0][0] * video_durations, 1), round(logits.tolist()[0][1] * video_durations, 1))
return (start, end)
|
class TvpProcessor(ProcessorMixin):
'''
Constructs an TVP processor which wraps a TVP image processor and a Bert tokenizer into a single processor.
[`TvpProcessor`] offers all the functionalities of [`TvpImageProcessor`] and [`BertTokenizerFast`]. See the
[`~TvpProcessor.__call__`] and [`~TvpProcessor.decode`] for more information.
Args:
image_processor ([`TvpImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`BertTokenizerFast`], *optional*):
The tokenizer is a required input.
'''
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
pass
def post_process_video_grounding(self, logits, video_durations):
'''
Compute the time of the video.
Args:
logits (`torch.Tensor`):
The logits output of TvpForVideoGrounding.
video_durations (`float`):
The video's duration.
Returns:
start (`float`):
The start time of the video.
end (`float`):
The end time of the video.
'''
pass
| 3
| 2
| 18
| 2
| 7
| 9
| 2
| 1.35
| 1
| 5
| 1
| 0
| 6
| 0
| 6
| 23
| 131
| 23
| 46
| 18
| 38
| 62
| 33
| 17
| 26
| 4
| 2
| 1
| 11
|
5,694
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/configuration_udop.py
|
transformers.models.udop.configuration_udop.UdopConfig
|
from ...configuration_utils import PretrainedConfig
class UdopConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`UdopForConditionalGeneration`]. It is used to
instantiate a UDOP model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the UDOP
[microsoft/udop-large](https://huggingface.co/microsoft/udop-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 33201):
Vocabulary size of the UDOP model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`UdopForConditionalGeneration`].
d_model (`int`, *optional*, defaults to 1024):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
be defined as `num_heads * d_kv`.
d_ff (`int`, *optional*, defaults to 4096):
Size of the intermediate feed forward layer in each `UdopBlock`.
num_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder and decoder.
num_decoder_layers (`int`, *optional*):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder and decoder.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
relative_bias_args (`list[dict]`, *optional*, defaults to `[{'type': '1d'}, {'type': 'horizontal'}, {'type': 'vertical'}]`):
A list of dictionaries containing the arguments for the relative bias layers.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. Udopv1.1 uses the
`"gated-gelu"` feed forward projection. Original Udop uses `"relu"`.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model should behave as an encoder/decoder or not.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 1):
The id of the end-of-sequence token in the vocabulary.
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum absolute position embeddings for relative position encoding.
image_size (`int`, *optional*, defaults to 224):
The size of the input images.
patch_size (`int`, *optional*, defaults to 16):
The patch size used by the vision encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of channels in the input images.
"""
model_type = 'udop'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, vocab_size=33201, d_model=1024, d_kv=64, d_ff=4096, num_layers=24, num_decoder_layers=None, num_heads=16, relative_attention_num_buckets=32, relative_attention_max_distance=128, relative_bias_args=[{'type': '1d'}, {'type': 'horizontal'}, {'type': 'vertical'}], dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='relu', is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, max_2d_position_embeddings=1024, image_size=224, patch_size=16, num_channels=3, **kwargs):
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
self.max_2d_position_embeddings = max_2d_position_embeddings
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
if not isinstance(relative_bias_args, list):
raise TypeError('`relative_bias_args` should be a list of dictionaries.')
self.relative_bias_args = relative_bias_args
act_info = self.feed_forward_proj.split('-')
self.dense_act_fn = act_info[-1]
self.is_gated_act = act_info[0] == 'gated'
if len(act_info) > 1 and act_info[0] != 'gated' or len(act_info) > 2:
raise ValueError(f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.Please make sure `feed_forward_proj` is of the format `gated-{{ACT_FN}}` or `{{ACT_FN}}`, e.g. 'gated-gelu' or 'relu'")
super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs)
|
class UdopConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`UdopForConditionalGeneration`]. It is used to
instantiate a UDOP model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the UDOP
[microsoft/udop-large](https://huggingface.co/microsoft/udop-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 33201):
Vocabulary size of the UDOP model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`UdopForConditionalGeneration`].
d_model (`int`, *optional*, defaults to 1024):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
be defined as `num_heads * d_kv`.
d_ff (`int`, *optional*, defaults to 4096):
Size of the intermediate feed forward layer in each `UdopBlock`.
num_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder and decoder.
num_decoder_layers (`int`, *optional*):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder and decoder.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
relative_bias_args (`list[dict]`, *optional*, defaults to `[{'type': '1d'}, {'type': 'horizontal'}, {'type': 'vertical'}]`):
A list of dictionaries containing the arguments for the relative bias layers.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. Udopv1.1 uses the
`"gated-gelu"` feed forward projection. Original Udop uses `"relu"`.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model should behave as an encoder/decoder or not.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 1):
The id of the end-of-sequence token in the vocabulary.
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum absolute position embeddings for relative position encoding.
image_size (`int`, *optional*, defaults to 224):
The size of the input images.
patch_size (`int`, *optional*, defaults to 16):
The patch size used by the vision encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of channels in the input images.
'''
def __init__(self, vocab_size=33201, d_model=1024, d_kv=64, d_ff=4096, num_layers=24, num_decoder_layers=None, num_heads=16, relative_attention_num_buckets=32, relative_attention_max_distance=128, relative_bias_args=[{'type': '1d'}, {'type': 'horizontal'}, {'type': 'vertical'}], dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='relu', is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, max_2d_position_embeddings=1024, image_size=224, patch_size=16, num_channels=3, **kwargs):
pass
| 2
| 1
| 69
| 4
| 64
| 2
| 4
| 0.87
| 1
| 4
| 0
| 0
| 1
| 21
| 1
| 1
| 134
| 8
| 68
| 52
| 41
| 59
| 32
| 27
| 30
| 4
| 1
| 1
| 4
|
5,695
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
|
transformers.models.udop.modeling_udop.BaseModelOutputWithAttentionMask
|
from typing import Any, Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils import ModelOutput, auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling
from dataclasses import dataclass
import torch
@dataclass
@auto_docstring(custom_intro="\n Class for the model's outputs that may also contain a past key/values (to speed up sequential decoding). Includes\n an additional attention mask.\n ")
class BaseModelOutputWithAttentionMask(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only
the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Attention mask used in the model's forward pass to avoid performing attention on padding token indices.
Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the
self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks)
that can be used (see `past_key_values` input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
attention_mask: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Class for the model's outputs that may also contain a past key/values (to speed up sequential decoding). Includes\n an additional attention mask.\n ")
class BaseModelOutputWithAttentionMask(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only
the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Attention mask used in the model's forward pass to avoid performing attention on padding token indices.
Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the
self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks)
that can be used (see `past_key_values` input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4.43
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 2
| 7
| 7
| 6
| 31
| 7
| 7
| 6
| 0
| 1
| 0
| 0
|
5,696
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
|
transformers.models.udop.modeling_udop.RelativePositionBias1D
|
from typing import Any, Optional, Union
from torch import Tensor, nn
import torch
class RelativePositionBias1D(RelativePositionBiasBase):
def __init__(self, scaling_factor=1, max_distance=128, **kwargs):
"""
Reimplementation of T5 relative position bias. Distance between given tokens is their distance in the sequence.
Parameters are the same as in base class
"""
super().__init__(scaling_factor=scaling_factor, max_distance=max_distance, **kwargs)
def prepare_input(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor:
if self.scaling_factor != 1:
raise ValueError('No need to scale 1d features')
relative_position = self.get_relative_position(torch.arange(attention_mask.size(1), dtype=torch.long, device=attention_mask.device)[None, :])
return relative_position
|
class RelativePositionBias1D(RelativePositionBiasBase):
def __init__(self, scaling_factor=1, max_distance=128, **kwargs):
'''
Reimplementation of T5 relative position bias. Distance between given tokens is their distance in the sequence.
Parameters are the same as in base class
'''
pass
def prepare_input(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor:
pass
| 3
| 1
| 7
| 1
| 5
| 2
| 2
| 0.4
| 1
| 5
| 0
| 0
| 2
| 0
| 2
| 37
| 16
| 2
| 10
| 4
| 7
| 4
| 8
| 4
| 5
| 2
| 5
| 1
| 3
|
5,697
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
|
transformers.models.udop.modeling_udop.RelativePositionBiasAggregated
|
from collections.abc import Sequence
from typing import Any, Optional, Union
from torch import Tensor, nn
class RelativePositionBiasAggregated(nn.Module):
def __init__(self, modules: Sequence[RelativePositionBiasBase]):
"""
Class which sums up various computed biases.
Args:
modules (Sequence[RelativePositionBiasBase]):
List of relative bias modules.
"""
super().__init__()
self.biases = nn.ModuleList(modules)
def forward(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Union[float, Tensor]:
output = 0.0
for bias in self.biases:
output = bias(attention_mask, bbox) + output
return output
|
class RelativePositionBiasAggregated(nn.Module):
def __init__(self, modules: Sequence[RelativePositionBiasBase]):
'''
Class which sums up various computed biases.
Args:
modules (Sequence[RelativePositionBiasBase]):
List of relative bias modules.
'''
pass
def forward(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Union[float, Tensor]:
pass
| 3
| 1
| 9
| 1
| 5
| 4
| 2
| 0.64
| 1
| 6
| 1
| 0
| 2
| 1
| 2
| 12
| 20
| 3
| 11
| 8
| 6
| 7
| 9
| 6
| 6
| 2
| 1
| 1
| 3
|
5,698
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
|
transformers.models.udop.modeling_udop.RelativePositionBiasBase
|
import torch
from abc import ABC, abstractmethod
from typing import Any, Optional, Union
import random
from torch import Tensor, nn
class RelativePositionBiasBase(nn.Module, ABC):
"""
Base class of relative biases.
Args:
num_heads (`int`):
Number of attention heads in the model, it will create embeddings of size `num_heads`, which will be added to the scores of each token pair.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
Pair token metric (distance in the sequence, distance in pixels etc.) will be bucketed, parameter is defining number of such
buckets.
bidirectional (`bool`, *optional*, defaults to `True`):
Whether the distance should be bidirectional for a pair of tokens. If `False`, then distance(tok1, tok2) == distance(tok2, tok1).
scaling_factor (`int`, *optional*, defaults to 1):
Defining factor which will be used to scale relative distance.
max_distance (`int`, *optional*, defaults to 128):
All distances above this value will end up in the one/same bucket.
augmentation (`bool`, *optional*, defaults to `False`):
Whether to multiply relative distances by a random scalar.
expand (`bool`, *optional*, defaults to `False`):
Whether to expand an existing pretrained model with subsequent additions of prefix_bucket.
"""
def __init__(self, num_heads=None, relative_attention_num_buckets=32, bidirectional=True, scaling_factor=1, max_distance=128, level='tokens', augmentation=False, prefix_bucket=False, expand=False):
super().__init__()
self.prefix_bucket = prefix_bucket
self.augmentation = augmentation
self.level = level
self.max_distance = max_distance
self.scaling_factor = scaling_factor
self.bidirectional = bidirectional
self.num_heads = num_heads
self.expand = expand
self.relative_attention_num_buckets = relative_attention_num_buckets
extra_head = 2 if prefix_bucket and (not self.expand) else 0
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets + extra_head, self.num_heads)
@abstractmethod
def prepare_input(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor:
pass
def get_bucket(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor:
relative_position = self.prepare_input(attention_mask, bbox)
rp_bucket: Tensor = get_relative_position_bucket(relative_position, bidirectional=self.bidirectional, num_buckets=self.relative_attention_num_buckets, max_distance=self.max_distance)
return rp_bucket
def get_relative_position(self, positions):
context_position = positions[:, :, None]
memory_position = positions[:, None, :]
relative_position = memory_position - context_position
if self.augmentation and self.training:
relative_position *= random.uniform(*AUGMENTATION_RANGE)
relative_position *= self.scaling_factor
return relative_position.to(torch.long)
def forward(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor:
if self.expand and self.prefix_bucket:
new_bias = nn.Embedding(self.relative_attention_num_buckets + 2, self.num_heads)
new_bias.weight.data[:self.relative_attention_num_buckets] = self.relative_attention_bias.weight.data
new_bias.weight.data[self.relative_attention_num_buckets:] = 0.1
self.relative_attention_bias = new_bias
self.expand = False
rp_bucket = self.get_bucket(attention_mask, bbox)
if self.prefix_bucket:
if rp_bucket.size(0) == 1 and attention_mask.size(0) > 1:
rp_bucket = rp_bucket.repeat(attention_mask.size(0), 1, 1)
is_prefix = bbox[:, :, 1] < 0
num_prefix = is_prefix.sum(-1)
for idx, num_prefix_row in enumerate(num_prefix.cpu().numpy()):
rp_bucket[idx, :num_prefix_row, num_prefix_row:] = self.relative_attention_num_buckets
rp_bucket[idx, num_prefix_row:, :num_prefix_row] = self.relative_attention_num_buckets + 1
values: Tensor = self.relative_attention_bias(rp_bucket)
if values.dim() != 4:
raise ValueError('Wrong dimension of values tensor')
values = values.permute([0, 3, 1, 2])
return values
|
class RelativePositionBiasBase(nn.Module, ABC):
'''
Base class of relative biases.
Args:
num_heads (`int`):
Number of attention heads in the model, it will create embeddings of size `num_heads`, which will be added to the scores of each token pair.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
Pair token metric (distance in the sequence, distance in pixels etc.) will be bucketed, parameter is defining number of such
buckets.
bidirectional (`bool`, *optional*, defaults to `True`):
Whether the distance should be bidirectional for a pair of tokens. If `False`, then distance(tok1, tok2) == distance(tok2, tok1).
scaling_factor (`int`, *optional*, defaults to 1):
Defining factor which will be used to scale relative distance.
max_distance (`int`, *optional*, defaults to 128):
All distances above this value will end up in the one/same bucket.
augmentation (`bool`, *optional*, defaults to `False`):
Whether to multiply relative distances by a random scalar.
expand (`bool`, *optional*, defaults to `False`):
Whether to expand an existing pretrained model with subsequent additions of prefix_bucket.
'''
def __init__(self, num_heads=None, relative_attention_num_buckets=32, bidirectional=True, scaling_factor=1, max_distance=128, level='tokens', augmentation=False, prefix_bucket=False, expand=False):
pass
@abstractmethod
def prepare_input(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor:
pass
def get_bucket(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor:
pass
def get_relative_position(self, positions):
pass
def forward(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor:
pass
| 7
| 1
| 15
| 1
| 14
| 0
| 2
| 0.3
| 2
| 6
| 0
| 3
| 5
| 10
| 5
| 35
| 102
| 11
| 70
| 44
| 48
| 21
| 49
| 28
| 43
| 6
| 4
| 2
| 12
|
5,699
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/udop/modeling_udop.py
|
transformers.models.udop.modeling_udop.RelativePositionBiasHorizontal
|
from torch import Tensor, nn
from typing import Any, Optional, Union
class RelativePositionBiasHorizontal(RelativePositionBiasBase):
def __init__(self, scaling_factor=100, max_distance=100, **kwargs):
"""
Represents in the bucket embeddings horizontal distance between two tokens. Parameters are the same as in base
class
"""
super().__init__(scaling_factor=scaling_factor, max_distance=max_distance, **kwargs)
def prepare_input(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor:
if not self.scaling_factor > 1.0:
raise ValueError('Need to scale the values of bboxes, as there are in small (0,1) range')
if bbox is None:
raise ValueError('Bbox is required for horizontal relative position bias')
horizontal_position: Tensor = bbox[:, :, [0, 2]].mean(dim=-1)
return self.get_relative_position(horizontal_position)
|
class RelativePositionBiasHorizontal(RelativePositionBiasBase):
def __init__(self, scaling_factor=100, max_distance=100, **kwargs):
'''
Represents in the bucket embeddings horizontal distance between two tokens. Parameters are the same as in base
class
'''
pass
def prepare_input(self, attention_mask: Optional[Tensor]=None, bbox: Optional[dict[str, Any]]=None) -> Tensor:
pass
| 3
| 1
| 8
| 1
| 5
| 3
| 2
| 0.5
| 1
| 5
| 0
| 0
| 2
| 0
| 2
| 37
| 17
| 2
| 10
| 4
| 7
| 5
| 10
| 4
| 7
| 3
| 5
| 1
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.