id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,300
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py
|
transformers.models.layoutlmv2.modeling_layoutlmv2.LayoutLMv2VisualBackbone
|
import math
from torch import nn
import torch
class LayoutLMv2VisualBackbone(nn.Module):
def __init__(self, config):
super().__init__()
self.cfg = config.get_detectron2_config()
meta_arch = self.cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg)
assert isinstance(model.backbone, detectron2.modeling.backbone.FPN)
self.backbone = model.backbone
assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
self.register_buffer('pixel_mean', torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1), persistent=False)
self.register_buffer('pixel_std', torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1), persistent=False)
self.out_feature_key = 'p2'
if torch.are_deterministic_algorithms_enabled():
logger.warning('using `AvgPool2d` instead of `AdaptiveAvgPool2d`')
input_shape = (224, 224)
backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride
self.pool = nn.AvgPool2d((math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]), math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1])))
else:
self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2])
if len(config.image_feature_pool_shape) == 2:
config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels)
assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2]
def forward(self, images):
images_input = ((images if torch.is_tensor(images) else images.tensor) - self.pixel_mean) / self.pixel_std
features = self.backbone(images_input)
features = features[self.out_feature_key]
features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous()
return features
def synchronize_batch_norm(self):
if not (torch.distributed.is_available() and torch.distributed.is_initialized() and (torch.distributed.get_rank() > -1)):
raise RuntimeError('Make sure torch.distributed is set up properly.')
self_rank = torch.distributed.get_rank()
node_size = torch.cuda.device_count()
world_size = torch.distributed.get_world_size()
if not world_size % node_size == 0:
raise RuntimeError('Make sure the number of processes can be divided by the number of nodes')
node_global_ranks = [list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)]
sync_bn_groups = [torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size)]
node_rank = self_rank // node_size
self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank])
|
class LayoutLMv2VisualBackbone(nn.Module):
def __init__(self, config):
pass
def forward(self, images):
pass
def synchronize_batch_norm(self):
pass
| 4
| 0
| 20
| 1
| 19
| 0
| 3
| 0
| 1
| 5
| 0
| 0
| 3
| 4
| 3
| 13
| 64
| 6
| 58
| 21
| 54
| 0
| 40
| 21
| 36
| 3
| 1
| 1
| 8
|
3,301
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv2/processing_layoutlmv2.py
|
transformers.models.layoutlmv2.processing_layoutlmv2.LayoutLMv2Processor
|
from ...processing_utils import ProcessorMixin
import warnings
from ...utils import TensorType
from typing import Optional, Union
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
class LayoutLMv2Processor(ProcessorMixin):
"""
Constructs a LayoutLMv2 processor which combines a LayoutLMv2 image processor and a LayoutLMv2 tokenizer into a
single processor.
[`LayoutLMv2Processor`] offers all the functionalities you need to prepare data for the model.
It first uses [`LayoutLMv2ImageProcessor`] to resize document images to a fixed size, and optionally applies OCR to
get words and normalized bounding boxes. These are then provided to [`LayoutLMv2Tokenizer`] or
[`LayoutLMv2TokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
`attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
into token-level `labels` for token classification tasks (such as FUNSD, CORD).
Args:
image_processor (`LayoutLMv2ImageProcessor`, *optional*):
An instance of [`LayoutLMv2ImageProcessor`]. The image processor is a required input.
tokenizer (`LayoutLMv2Tokenizer` or `LayoutLMv2TokenizerFast`, *optional*):
An instance of [`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`]. The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'LayoutLMv2ImageProcessor'
tokenizer_class = ('LayoutLMv2Tokenizer', 'LayoutLMv2TokenizerFast')
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if 'feature_extractor' in kwargs:
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = image_processor if image_processor is not None else feature_extractor
super().__init__(image_processor, tokenizer)
def __call__(self, images, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=False, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:
"""
This method first forwards the `images` argument to [`~LayoutLMv2ImageProcessor.__call__`]. In case
[`LayoutLMv2ImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
bounding boxes along with the additional arguments to [`~LayoutLMv2Tokenizer.__call__`] and returns the output,
together with resized `images`. In case [`LayoutLMv2ImageProcessor`] was initialized with `apply_ocr` set to
`False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional
arguments to [`~LayoutLMv2Tokenizer.__call__`] and returns the output, together with resized `images``.
Please refer to the docstring of the above two methods for more information.
"""
if self.image_processor.apply_ocr and boxes is not None:
raise ValueError('You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.')
if self.image_processor.apply_ocr and word_labels is not None:
raise ValueError('You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.')
features = self.image_processor(images=images, return_tensors=return_tensors)
if text is not None and self.image_processor.apply_ocr and (text_pair is None):
if isinstance(text, str):
text = [text]
text_pair = features['words']
encoded_inputs = self.tokenizer(text=text if text is not None else features['words'], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['boxes'], word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)
images = features.pop('pixel_values')
if return_overflowing_tokens is True:
images = self.get_overflowing_images(images, encoded_inputs['overflow_to_sample_mapping'])
encoded_inputs['image'] = images
return encoded_inputs
def get_overflowing_images(self, images, overflow_to_sample_mapping):
images_with_overflow = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(images_with_overflow) != len(overflow_to_sample_mapping):
raise ValueError(f'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}')
return images_with_overflow
@property
def model_input_names(self):
return ['input_ids', 'bbox', 'token_type_ids', 'attention_mask', 'image']
@property
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor
|
class LayoutLMv2Processor(ProcessorMixin):
'''
Constructs a LayoutLMv2 processor which combines a LayoutLMv2 image processor and a LayoutLMv2 tokenizer into a
single processor.
[`LayoutLMv2Processor`] offers all the functionalities you need to prepare data for the model.
It first uses [`LayoutLMv2ImageProcessor`] to resize document images to a fixed size, and optionally applies OCR to
get words and normalized bounding boxes. These are then provided to [`LayoutLMv2Tokenizer`] or
[`LayoutLMv2TokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
`attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
into token-level `labels` for token classification tasks (such as FUNSD, CORD).
Args:
image_processor (`LayoutLMv2ImageProcessor`, *optional*):
An instance of [`LayoutLMv2ImageProcessor`]. The image processor is a required input.
tokenizer (`LayoutLMv2Tokenizer` or `LayoutLMv2TokenizerFast`, *optional*):
An instance of [`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`]. The tokenizer is a required input.
'''
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
pass
def __call__(self, images, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=False, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:
'''
This method first forwards the `images` argument to [`~LayoutLMv2ImageProcessor.__call__`]. In case
[`LayoutLMv2ImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
bounding boxes along with the additional arguments to [`~LayoutLMv2Tokenizer.__call__`] and returns the output,
together with resized `images`. In case [`LayoutLMv2ImageProcessor`] was initialized with `apply_ocr` set to
`False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional
arguments to [`~LayoutLMv2Tokenizer.__call__`] and returns the output, together with resized `images``.
Please refer to the docstring of the above two methods for more information.
'''
pass
def get_overflowing_images(self, images, overflow_to_sample_mapping):
pass
@property
def model_input_names(self):
pass
@property
def feature_extractor_class(self):
pass
@property
def feature_extractor_class(self):
pass
| 10
| 2
| 18
| 2
| 13
| 3
| 3
| 0.33
| 1
| 8
| 2
| 0
| 8
| 0
| 8
| 25
| 175
| 24
| 114
| 42
| 80
| 38
| 52
| 17
| 43
| 10
| 2
| 2
| 23
|
3,302
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py
|
transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer
|
import collections
import os
from ...tokenization_utils_base import BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy
from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from typing import Optional, Union
from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
class LayoutLMv2Tokenizer(PreTrainedTokenizer):
"""
Construct a LayoutLMv2 tokenizer. Based on WordPiece. [`LayoutLMv2Tokenizer`] can be used to turn words, word-level
bounding boxes and optional word labels to token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and
optional `labels` (for token classification).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
[`LayoutLMv2Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the
word-level bounding boxes into token-level bounding boxes.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, model_max_length: int=512, additional_special_tokens: Optional[list[str]]=None, **kwargs):
sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
mask_token = AddedToken(mask_token, special=True) if isinstance(mask_token, str) else mask_token
if not os.path.isfile(vocab_file):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, model_max_length=model_max_length, additional_special_tokens=additional_special_tokens, **kwargs)
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(vocab_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if len(t) == 0:
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
if not _is_valid_text_input(text):
raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')
if not isinstance(text_pair, (list, tuple)):
raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')
elif not isinstance(text, (list, tuple)):
raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError('You must provide corresponding bounding boxes')
if is_batched:
if len(words) != len(boxes):
raise ValueError('You must provide words and boxes for an equal amount of examples')
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError('You must provide as many words as there are bounding boxes')
elif len(words) != len(boxes):
raise ValueError('You must provide as many words as there are bounding boxes')
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.')
batch_outputs = self._batch_prepare_for_model(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose)
return BatchEncoding(batch_outputs)
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens.
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
"""
batch_outputs = {}
for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
batch_text_or_text_pair, boxes_example = example
outputs = self.prepare_for_model(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING)
def encode(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> list[int]:
encoded_inputs = self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
return encoded_inputs['input_ids']
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674')
return self.prepare_for_model(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose)
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
"""
Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
truncates sequences if overflowing while taking into account the special tokens and manages a moving window
(with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
*truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
combination of arguments will raise an error.
Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
labeled with -100, such that they will be ignored by the loss function.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
tokens = []
pair_tokens = []
token_boxes = []
pair_token_boxes = []
labels = []
if text_pair is None:
if word_labels is None:
for word, box in zip(text, boxes):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
else:
for word, box, label in zip(text, boxes, word_labels):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
if self.only_label_first_subword:
labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
else:
labels.extend([label] * len(word_tokens))
else:
tokens = self.tokenize(text)
token_boxes = [self.pad_token_box for _ in range(len(tokens))]
for word, box in zip(text_pair, boxes):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
pair_tokens.extend(word_tokens)
pair_token_boxes.extend([box] * len(word_tokens))
ids = self.convert_tokens_to_ids(tokens)
pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
if return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and (pair_ids is not None):
raise ValueError('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.')
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
overflowing_tokens = []
overflowing_token_boxes = []
overflowing_labels = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and (total_len > max_length):
ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels = self.truncate_sequences(ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride)
if return_token_type_ids and (not add_special_tokens):
raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')
if return_token_type_ids is None:
return_token_type_ids = 'token_type_ids' in self.model_input_names
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
encoded_inputs = {}
if return_overflowing_tokens:
encoded_inputs['overflowing_tokens'] = overflowing_tokens
encoded_inputs['overflowing_token_boxes'] = overflowing_token_boxes
encoded_inputs['overflowing_labels'] = overflowing_labels
encoded_inputs['num_truncated_tokens'] = total_len - max_length
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
if pair_token_boxes:
pair_token_boxes = pair_token_boxes + [self.sep_token_box]
if labels:
labels = [self.pad_token_label] + labels + [self.pad_token_label]
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
encoded_inputs['input_ids'] = sequence
encoded_inputs['bbox'] = token_boxes + pair_token_boxes
if return_token_type_ids:
encoded_inputs['token_type_ids'] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs['special_tokens_mask'] = [0] * len(sequence)
if labels:
encoded_inputs['labels'] = labels
self._eventual_warn_about_too_long_sequence(encoded_inputs['input_ids'], max_length, verbose)
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
if return_length:
encoded_inputs['length'] = len(encoded_inputs['input_ids'])
batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)
return batch_outputs
def truncate_sequences(self, ids: list[int], token_boxes: list[list[int]], pair_ids: Optional[list[int]]=None, pair_token_boxes: Optional[list[list[int]]]=None, labels: Optional[list[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[str, TruncationStrategy]='longest_first', stride: int=0) -> tuple[list[int], list[int], list[int]]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
ids (`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
token_boxes (`List[List[int]]`):
Bounding boxes of the first sequence.
pair_ids (`List[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
pair_token_boxes (`List[List[int]]`, *optional*):
Bounding boxes of the second sequence.
labels (`List[int]`, *optional*):
Labels of the first sequence (for token classification tasks).
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
of sequences (or a batch of pairs) is provided.
"""
if num_tokens_to_remove <= 0:
return (ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [])
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
overflowing_token_boxes = []
overflowing_labels = []
if truncation_strategy == TruncationStrategy.ONLY_FIRST or (truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None):
if len(ids) > num_tokens_to_remove:
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
overflowing_token_boxes = token_boxes[-window_len:]
overflowing_labels = labels[-window_len:]
ids = ids[:-num_tokens_to_remove]
token_boxes = token_boxes[:-num_tokens_to_remove]
labels = labels[:-num_tokens_to_remove]
else:
error_msg = f'We need to remove {num_tokens_to_remove} to truncate the input but the first sequence has a length {len(ids)}. '
if truncation_strategy == TruncationStrategy.ONLY_FIRST:
error_msg = error_msg + f"Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_second'."
logger.error(error_msg)
elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
logger.warning(f"Be aware, overflowing tokens are not returned for the setting you have chosen, i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' truncation strategy. So the returned list will always be empty even if some tokens have been removed.")
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
ids = ids[:-1]
token_boxes = token_boxes[:-1]
labels = labels[:-1]
else:
pair_ids = pair_ids[:-1]
pair_token_boxes = pair_token_boxes[:-1]
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
if len(pair_ids) > num_tokens_to_remove:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
overflowing_token_boxes = pair_token_boxes[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
else:
logger.error(f"We need to remove {num_tokens_to_remove} to truncate the input but the second sequence has a length {len(pair_ids)}. Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_first'.")
return (ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels)
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
if return_attention_mask and 'attention_mask' not in encoded_inputs:
encoded_inputs['attention_mask'] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == 'right':
if return_attention_mask:
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = encoded_inputs['bbox'] + [self.pad_token_box] * difference
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == 'left':
if return_attention_mask:
encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids']
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = [self.pad_token_box] * difference + encoded_inputs['bbox']
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels']
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError('Invalid padding strategy:' + str(padding_side))
return encoded_inputs
|
class LayoutLMv2Tokenizer(PreTrainedTokenizer):
'''
Construct a LayoutLMv2 tokenizer. Based on WordPiece. [`LayoutLMv2Tokenizer`] can be used to turn words, word-level
bounding boxes and optional word labels to token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and
optional `labels` (for token classification).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
[`LayoutLMv2Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the
word-level bounding boxes into token-level bounding boxes.
'''
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, model_max_length: int=512, additional_special_tokens: Optional[list[str]]=None, **kwargs):
pass
@property
def do_lower_case(self):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def _tokenize(self, text):
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
'''
pass
def _is_valid_text_input(t):
pass
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
'''
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens.
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
'''
pass
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING)
def encode(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> list[int]:
pass
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
'''
pass
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
'''
Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
truncates sequences if overflowing while taking into account the special tokens and manages a moving window
(with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
*truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
combination of arguments will raise an error.
Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
labeled with -100, such that they will be ignored by the loss function.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
'''
pass
def truncate_sequences(self, ids: list[int], token_boxes: list[list[int]], pair_ids: Optional[list[int]]=None, pair_token_boxes: Optional[list[list[int]]]=None, labels: Optional[list[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[str, TruncationStrategy]='longest_first', stride: int=0) -> tuple[list[int], list[int], list[int]]:
'''
Truncates a sequence pair in-place following the strategy.
Args:
ids (`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
token_boxes (`List[List[int]]`):
Bounding boxes of the first sequence.
pair_ids (`List[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
pair_token_boxes (`List[List[int]]`, *optional*):
Bounding boxes of the second sequence.
labels (`List[int]`, *optional*):
Labels of the first sequence (for token classification tasks).
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
of sequences (or a batch of pairs) is provided.
'''
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
'''
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
| 31
| 12
| 49
| 3
| 38
| 8
| 6
| 0.23
| 1
| 17
| 4
| 0
| 22
| 10
| 22
| 111
| 1,163
| 95
| 878
| 328
| 618
| 199
| 303
| 90
| 279
| 30
| 3
| 4
| 127
|
3,303
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py
|
transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast
|
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...tokenization_utils_base import BatchEncoding, EncodedInput, PaddingStrategy, PreTokenizedInput, TensorType, TextInput, TextInputPair, TruncationStrategy
from tokenizers import normalizers
from ...utils import add_end_docstrings, logging
import json
from typing import Optional, Union
from .tokenization_layoutlmv2 import LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, LayoutLMv2Tokenizer
class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original LayoutLMv2).
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = LayoutLMv2Tokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if pre_tok_state.get('lowercase', do_lower_case) != do_lower_case or pre_tok_state.get('strip_accents', strip_accents) != strip_accents:
pre_tok_class = getattr(normalizers, pre_tok_state.pop('type'))
pre_tok_state['lowercase'] = do_lower_case
pre_tok_state['strip_accents'] = strip_accents
self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if len(t) == 0:
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
if not _is_valid_text_input(text):
raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')
if not isinstance(text_pair, (list, tuple)):
raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')
elif not isinstance(text, (list, tuple)):
raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError('You must provide corresponding bounding boxes')
if is_batched:
if len(words) != len(boxes):
raise ValueError('You must provide words and boxes for an equal amount of examples')
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError('You must provide as many words as there are bounding boxes')
elif len(words) != len(boxes):
raise ValueError('You must provide as many words as there are bounding boxes')
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]:
batched_input = [(text, pair)] if pair else [text]
encodings = self._tokenizer.encode_batch(batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs)
return encodings[0].tokens
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise TypeError(f'batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})')
self.set_truncation_and_padding(padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side)
if is_pair:
batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
encodings = self._tokenizer.encode_batch(batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True)
tokens_and_encodings = [self._convert_encoding(encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, return_length=return_length, verbose=verbose) for encoding in encodings]
sanitized_tokens = {}
for key in tokens_and_encodings[0][0]:
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks['input_ids'])
sanitized_tokens['overflow_to_sample_mapping'] = overflow_to_sample_mapping
for input_ids in sanitized_tokens['input_ids']:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
token_boxes = []
for batch_index in range(len(sanitized_tokens['input_ids'])):
if return_overflowing_tokens:
original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]
else:
original_index = batch_index
token_boxes_example = []
for id, sequence_id, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids):
if word_id is not None:
if is_pair and sequence_id == 0:
token_boxes_example.append(self.pad_token_box)
else:
token_boxes_example.append(boxes[original_index][word_id])
elif id == self.cls_token_id:
token_boxes_example.append(self.cls_token_box)
elif id == self.sep_token_id:
token_boxes_example.append(self.sep_token_box)
elif id == self.pad_token_id:
token_boxes_example.append(self.pad_token_box)
else:
raise ValueError('Id not recognized')
token_boxes.append(token_boxes_example)
sanitized_tokens['bbox'] = token_boxes
if word_labels is not None:
labels = []
for batch_index in range(len(sanitized_tokens['input_ids'])):
if return_overflowing_tokens:
original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]
else:
original_index = batch_index
labels_example = []
for id, offset, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_tokens['offset_mapping'][batch_index], sanitized_encodings[batch_index].word_ids):
if word_id is not None:
if self.only_label_first_subword:
if offset[0] == 0:
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
else:
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
labels.append(labels_example)
sanitized_tokens['labels'] = labels
if not return_offsets_mapping:
del sanitized_tokens['offset_mapping']
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
batched_input = [(text, text_pair)] if text_pair else [text]
batched_boxes = [boxes]
batched_word_labels = [word_labels] if word_labels is not None else None
batched_output = self._batch_encode_plus(batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
if return_tensors is None and (not return_overflowing_tokens):
batched_output = BatchEncoding({key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items()}, batched_output.encodings)
self._eventual_warn_about_too_long_sequence(batched_output['input_ids'], max_length, verbose)
return batched_output
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
if return_attention_mask and 'attention_mask' not in encoded_inputs:
encoded_inputs['attention_mask'] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == 'right':
if return_attention_mask:
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = encoded_inputs['bbox'] + [self.pad_token_box] * difference
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == 'left':
if return_attention_mask:
encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids']
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = [self.pad_token_box] * difference + encoded_inputs['bbox']
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels']
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError('Invalid padding strategy:' + str(padding_side))
return encoded_inputs
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original LayoutLMv2).
'''
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, **kwargs):
pass
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
'''
pass
def _is_valid_text_input(t):
pass
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]:
pass
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
'''
pass
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
pass
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
'''
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 15
| 5
| 60
| 4
| 46
| 10
| 7
| 0.29
| 1
| 15
| 2
| 0
| 11
| 6
| 11
| 99
| 761
| 60
| 547
| 208
| 386
| 156
| 185
| 60
| 172
| 24
| 3
| 6
| 81
|
3,304
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py
|
transformers.models.layoutlmv3.configuration_layoutlmv3.LayoutLMv3Config
|
from ...configuration_utils import PretrainedConfig
class LayoutLMv3Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`LayoutLMv3Model`]. It is used to instantiate an
LayoutLMv3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LayoutLMv3
[microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the LayoutLMv3 model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`LayoutLMv3Model`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv3Model`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the 2D position embedding might ever be used with. Typically set this to something
large just in case (e.g., 1024).
coordinate_size (`int`, *optional*, defaults to `128`):
Dimension of the coordinate embeddings.
shape_size (`int`, *optional*, defaults to `128`):
Dimension of the width and height embeddings.
has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a relative attention bias in the self-attention mechanism.
rel_pos_bins (`int`, *optional*, defaults to 32):
The number of relative position bins to be used in the self-attention mechanism.
max_rel_pos (`int`, *optional*, defaults to 128):
The maximum number of relative positions to be used in the self-attention mechanism.
max_rel_2d_pos (`int`, *optional*, defaults to 256):
The maximum number of relative 2D positions in the self-attention mechanism.
rel_2d_pos_bins (`int`, *optional*, defaults to 64):
The number of 2D relative position bins in the self-attention mechanism.
has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a spatial attention bias in the self-attention mechanism.
visual_embed (`bool`, *optional*, defaults to `True`):
Whether or not to add patch embeddings.
input_size (`int`, *optional*, defaults to `224`):
The size (resolution) of the images.
num_channels (`int`, *optional*, defaults to `3`):
The number of channels of the images.
patch_size (`int`, *optional*, defaults to `16`)
The size (resolution) of the patches.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Example:
```python
>>> from transformers import LayoutLMv3Config, LayoutLMv3Model
>>> # Initializing a LayoutLMv3 microsoft/layoutlmv3-base style configuration
>>> configuration = LayoutLMv3Config()
>>> # Initializing a model (with random weights) from the microsoft/layoutlmv3-base style configuration
>>> model = LayoutLMv3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'layoutlmv3'
def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_2d_position_embeddings=1024, coordinate_size=128, shape_size=128, has_relative_attention_bias=True, rel_pos_bins=32, max_rel_pos=128, rel_2d_pos_bins=64, max_rel_2d_pos=256, has_spatial_attention_bias=True, text_embed=True, visual_embed=True, input_size=224, num_channels=3, patch_size=16, classifier_dropout=None, **kwargs):
super().__init__(vocab_size=vocab_size, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, hidden_act=hidden_act, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, initializer_range=initializer_range, layer_norm_eps=layer_norm_eps, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.max_2d_position_embeddings = max_2d_position_embeddings
self.coordinate_size = coordinate_size
self.shape_size = shape_size
self.has_relative_attention_bias = has_relative_attention_bias
self.rel_pos_bins = rel_pos_bins
self.max_rel_pos = max_rel_pos
self.has_spatial_attention_bias = has_spatial_attention_bias
self.rel_2d_pos_bins = rel_2d_pos_bins
self.max_rel_2d_pos = max_rel_2d_pos
self.text_embed = text_embed
self.visual_embed = visual_embed
self.input_size = input_size
self.num_channels = num_channels
self.patch_size = patch_size
self.classifier_dropout = classifier_dropout
|
class LayoutLMv3Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`LayoutLMv3Model`]. It is used to instantiate an
LayoutLMv3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LayoutLMv3
[microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the LayoutLMv3 model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`LayoutLMv3Model`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv3Model`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the 2D position embedding might ever be used with. Typically set this to something
large just in case (e.g., 1024).
coordinate_size (`int`, *optional*, defaults to `128`):
Dimension of the coordinate embeddings.
shape_size (`int`, *optional*, defaults to `128`):
Dimension of the width and height embeddings.
has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a relative attention bias in the self-attention mechanism.
rel_pos_bins (`int`, *optional*, defaults to 32):
The number of relative position bins to be used in the self-attention mechanism.
max_rel_pos (`int`, *optional*, defaults to 128):
The maximum number of relative positions to be used in the self-attention mechanism.
max_rel_2d_pos (`int`, *optional*, defaults to 256):
The maximum number of relative 2D positions in the self-attention mechanism.
rel_2d_pos_bins (`int`, *optional*, defaults to 64):
The number of 2D relative position bins in the self-attention mechanism.
has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a spatial attention bias in the self-attention mechanism.
visual_embed (`bool`, *optional*, defaults to `True`):
Whether or not to add patch embeddings.
input_size (`int`, *optional*, defaults to `224`):
The size (resolution) of the images.
num_channels (`int`, *optional*, defaults to `3`):
The number of channels of the images.
patch_size (`int`, *optional*, defaults to `16`)
The size (resolution) of the patches.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Example:
```python
>>> from transformers import LayoutLMv3Config, LayoutLMv3Model
>>> # Initializing a LayoutLMv3 microsoft/layoutlmv3-base style configuration
>>> configuration = LayoutLMv3Config()
>>> # Initializing a model (with random weights) from the microsoft/layoutlmv3-base style configuration
>>> model = LayoutLMv3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_2d_position_embeddings=1024, coordinate_size=128, shape_size=128, has_relative_attention_bias=True, rel_pos_bins=32, max_rel_pos=128, rel_2d_pos_bins=64, max_rel_2d_pos=256, has_spatial_attention_bias=True, text_embed=True, visual_embed=True, input_size=224, num_channels=3, patch_size=16, classifier_dropout=None, **kwargs):
pass
| 2
| 1
| 67
| 0
| 67
| 0
| 1
| 1.07
| 1
| 1
| 0
| 0
| 1
| 15
| 1
| 1
| 152
| 9
| 69
| 51
| 34
| 74
| 19
| 18
| 17
| 1
| 1
| 0
| 1
|
3,305
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py
|
transformers.models.layoutlmv3.configuration_layoutlmv3.LayoutLMv3OnnxConfig
|
from typing import TYPE_CHECKING, Any
from collections.abc import Mapping
from ...onnx.utils import compute_effective_axis_dimension
from ...onnx import OnnxConfig
from collections import OrderedDict
from packaging import version
class LayoutLMv3OnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.12')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task in ['question-answering', 'sequence-classification']:
return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
else:
return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels'})])
@property
def atol_for_validation(self) -> float:
return 1e-05
@property
def default_onnx_opset(self) -> int:
return 12
def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=40, image_height: int=40) -> Mapping[str, Any]:
"""
Generate inputs to provide to the ONNX exporter
Args:
processor ([`ProcessorMixin`]):
The processor associated with this model configuration.
batch_size (`int`, *optional*, defaults to -1):
The batch size to export the model for (-1 means dynamic axis).
seq_length (`int`, *optional*, defaults to -1):
The sequence length to export the model for (-1 means dynamic axis).
is_pair (`bool`, *optional*, defaults to `False`):
Indicate if the input is a pair (sentence 1, sentence 2).
num_channels (`int`, *optional*, defaults to 3):
The number of channels of the generated images.
image_width (`int`, *optional*, defaults to 40):
The width of the generated images.
image_height (`int`, *optional*, defaults to 40):
The height of the generated images.
Returns:
Mapping[str, Any]: holding the kwargs to provide to the model's forward function
"""
setattr(processor.image_processor, 'apply_ocr', False)
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
token_to_add = processor.tokenizer.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add)
dummy_text = [[' '.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
dummy_bboxes = [[[48, 84, 73, 128]]] * batch_size
dummy_image = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
inputs = dict(processor(dummy_image, text=dummy_text, boxes=dummy_bboxes, return_tensors='pt'))
return inputs
|
class LayoutLMv3OnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
@property
def default_onnx_opset(self) -> int:
pass
def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=40, image_height: int=40) -> Mapping[str, Any]:
'''
Generate inputs to provide to the ONNX exporter
Args:
processor ([`ProcessorMixin`]):
The processor associated with this model configuration.
batch_size (`int`, *optional*, defaults to -1):
The batch size to export the model for (-1 means dynamic axis).
seq_length (`int`, *optional*, defaults to -1):
The sequence length to export the model for (-1 means dynamic axis).
is_pair (`bool`, *optional*, defaults to `False`):
Indicate if the input is a pair (sentence 1, sentence 2).
num_channels (`int`, *optional*, defaults to 3):
The number of channels of the generated images.
image_width (`int`, *optional*, defaults to 40):
The width of the generated images.
image_height (`int`, *optional*, defaults to 40):
The height of the generated images.
Returns:
Mapping[str, Any]: holding the kwargs to provide to the model's forward function
'''
pass
| 8
| 1
| 23
| 2
| 14
| 8
| 1
| 0.51
| 1
| 7
| 0
| 0
| 4
| 1
| 4
| 4
| 101
| 12
| 59
| 25
| 41
| 30
| 20
| 11
| 15
| 2
| 1
| 1
| 5
|
3,306
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py
|
transformers.models.layoutlmv3.feature_extraction_layoutlmv3.LayoutLMv3FeatureExtractor
|
import warnings
from ...utils.import_utils import requires
from .image_processing_layoutlmv3 import LayoutLMv3ImageProcessor
@requires(backends=('vision',))
class LayoutLMv3FeatureExtractor(LayoutLMv3ImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class LayoutLMv3FeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use LayoutLMv3ImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class LayoutLMv3FeatureExtractor(LayoutLMv3ImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 24
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
3,307
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py
|
transformers.models.layoutlmv3.image_processing_layoutlmv3.LayoutLMv3ImageProcessor
|
from collections.abc import Iterable
from ...utils import TensorType, filter_out_non_signature_kwargs, is_pytesseract_available, is_vision_available, logging, requires_backends
from ...utils.import_utils import requires
from ...image_transforms import resize, to_channel_dimension_format, to_pil_image
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from typing import Optional, Union
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
import numpy as np
@requires(backends=('vision',))
class LayoutLMv3ImageProcessor(BaseImageProcessor):
"""
Constructs a LayoutLMv3 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to `(size["height"], size["width"])`. Can be
overridden by `do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image's pixel values by the specified `rescale_value`. Can be overridden by
`do_rescale` in `preprocess`.
rescale_factor (`float`, *optional*, defaults to 1 / 255):
Value by which the image's pixel values are rescaled. Can be overridden by `rescale_factor` in
`preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
apply_ocr (`bool`, *optional*, defaults to `True`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by
the `apply_ocr` parameter in the `preprocess` method.
ocr_lang (`str`, *optional*):
The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
used. Can be overridden by the `ocr_lang` parameter in the `preprocess` method.
tesseract_config (`str`, *optional*):
Any additional custom configuration flags that are forwarded to the `config` parameter when calling
Tesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the
`preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_value: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, Iterable[float]]]=None, image_std: Optional[Union[float, Iterable[float]]]=None, apply_ocr: bool=True, ocr_lang: Optional[str]=None, tesseract_config: Optional[str]='', **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 224, 'width': 224}
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_value
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.apply_ocr = apply_ocr
self.ocr_lang = ocr_lang
self.tesseract_config = tesseract_config
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
output_size = (size['height'], size['width'])
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, Iterable[float]]]=None, image_std: Optional[Union[float, Iterable[float]]]=None, apply_ocr: Optional[bool]=None, ocr_lang: Optional[str]=None, tesseract_config: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Desired size of the output image after applying `resize`.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` filters.
Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image pixel values between [0, 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to apply to the image pixel values. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `Iterable[float]`, *optional*, defaults to `self.image_mean`):
Mean values to be used for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `Iterable[float]`, *optional*, defaults to `self.image_std`):
Standard deviation values to be used for normalization. Only has an effect if `do_normalize` is set to
`True`.
apply_ocr (`bool`, *optional*, defaults to `self.apply_ocr`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes.
ocr_lang (`str`, *optional*, defaults to `self.ocr_lang`):
The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
used.
tesseract_config (`str`, *optional*, defaults to `self.tesseract_config`):
Any additional custom configuration flags that are forwarded to the `config` parameter when calling
Tesseract.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
apply_ocr = apply_ocr if apply_ocr is not None else self.apply_ocr
ocr_lang = ocr_lang if ocr_lang is not None else self.ocr_lang
tesseract_config = tesseract_config if tesseract_config is not None else self.tesseract_config
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if apply_ocr:
requires_backends(self, 'pytesseract')
words_batch = []
boxes_batch = []
for image in images:
words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)
words_batch.append(words)
boxes_batch.append(boxes)
if do_resize:
images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
data = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)
if apply_ocr:
data['words'] = words_batch
data['boxes'] = boxes_batch
return data
|
@requires(backends=('vision',))
class LayoutLMv3ImageProcessor(BaseImageProcessor):
'''
Constructs a LayoutLMv3 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to `(size["height"], size["width"])`. Can be
overridden by `do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image's pixel values by the specified `rescale_value`. Can be overridden by
`do_rescale` in `preprocess`.
rescale_factor (`float`, *optional*, defaults to 1 / 255):
Value by which the image's pixel values are rescaled. Can be overridden by `rescale_factor` in
`preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
apply_ocr (`bool`, *optional*, defaults to `True`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by
the `apply_ocr` parameter in the `preprocess` method.
ocr_lang (`str`, *optional*):
The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
used. Can be overridden by the `ocr_lang` parameter in the `preprocess` method.
tesseract_config (`str`, *optional*):
Any additional custom configuration flags that are forwarded to the `config` parameter when calling
Tesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the
`preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_value: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, Iterable[float]]]=None, image_std: Optional[Union[float, Iterable[float]]]=None, apply_ocr: bool=True, ocr_lang: Optional[str]=None, tesseract_config: Optional[str]='', **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, Iterable[float]]]=None, image_std: Optional[Union[float, Iterable[float]]]=None, apply_ocr: Optional[bool]=None, ocr_lang: Optional[str]=None, tesseract_config: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Desired size of the output image after applying `resize`.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` filters.
Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image pixel values between [0, 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to apply to the image pixel values. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `Iterable[float]`, *optional*, defaults to `self.image_mean`):
Mean values to be used for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `Iterable[float]`, *optional*, defaults to `self.image_std`):
Standard deviation values to be used for normalization. Only has an effect if `do_normalize` is set to
`True`.
apply_ocr (`bool`, *optional*, defaults to `self.apply_ocr`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes.
ocr_lang (`str`, *optional*, defaults to `self.ocr_lang`):
The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
used.
tesseract_config (`str`, *optional*, defaults to `self.tesseract_config`):
Any additional custom configuration flags that are forwarded to the `config` parameter when calling
Tesseract.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 6
| 3
| 76
| 5
| 45
| 26
| 9
| 0.83
| 1
| 8
| 2
| 1
| 3
| 11
| 3
| 23
| 272
| 20
| 138
| 61
| 94
| 114
| 65
| 21
| 61
| 21
| 3
| 2
| 27
|
3,308
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3Attention
|
import torch.nn as nn
class LayoutLMv3Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv3SelfAttention(config)
self.output = LayoutLMv3SelfOutput(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None):
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class LayoutLMv3Attention(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None):
pass
| 3
| 0
| 12
| 0
| 12
| 1
| 1
| 0.04
| 1
| 3
| 2
| 0
| 2
| 2
| 2
| 12
| 26
| 1
| 25
| 16
| 14
| 1
| 10
| 8
| 7
| 1
| 1
| 0
| 2
|
3,309
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3ClassificationHead
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class LayoutLMv3ClassificationHead(nn.Module):
"""
Head for sentence-level classification tasks. Reference: RobertaClassificationHead
"""
def __init__(self, config, pool_feature=False):
super().__init__()
self.pool_feature = pool_feature
if pool_feature:
self.dense = nn.Linear(config.hidden_size * 3, config.hidden_size)
else:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, x):
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
|
class LayoutLMv3ClassificationHead(nn.Module):
'''
Head for sentence-level classification tasks. Reference: RobertaClassificationHead
'''
def __init__(self, config, pool_feature=False):
pass
def forward(self, x):
pass
| 3
| 1
| 10
| 0
| 10
| 0
| 2
| 0.15
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 25
| 2
| 20
| 8
| 17
| 3
| 17
| 8
| 14
| 3
| 1
| 1
| 4
|
3,310
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3Encoder
|
from ...modeling_outputs import BaseModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch.nn as nn
import torch.nn.functional as F
import torch
import math
class LayoutLMv3Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv3Layer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_bias = nn.Linear(self.rel_pos_bins, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False)
def relative_position_bucket(self, relative_position, bidirectional=True, num_buckets=32, max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def _cal_1d_pos_emb(self, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = self.relative_position_bucket(rel_pos_mat, num_buckets=self.rel_pos_bins, max_distance=self.max_rel_pos)
with torch.no_grad():
rel_pos = self.rel_pos_bias.weight.t()[rel_pos].permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _cal_2d_pos_emb(self, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = self.relative_position_bucket(rel_pos_x_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos)
rel_pos_y = self.relative_position_bucket(rel_pos_y_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos)
with torch.no_grad():
rel_pos_x = self.rel_pos_x_bias.weight.t()[rel_pos_x].permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias.weight.t()[rel_pos_y].permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(self, hidden_states, bbox=None, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, position_ids=None, patch_height=None, patch_width=None):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
rel_pos = self._cal_1d_pos_emb(position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class LayoutLMv3Encoder(nn.Module):
def __init__(self, config):
pass
def relative_position_bucket(self, relative_position, bidirectional=True, num_buckets=32, max_distance=128):
pass
def _cal_1d_pos_emb(self, position_ids):
pass
def _cal_2d_pos_emb(self, bbox):
pass
def forward(self, hidden_states, bbox=None, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, position_ids=None, patch_height=None, patch_width=None):
pass
| 6
| 0
| 30
| 3
| 25
| 2
| 4
| 0.09
| 1
| 6
| 2
| 0
| 5
| 12
| 5
| 15
| 155
| 18
| 126
| 51
| 108
| 11
| 71
| 39
| 65
| 12
| 1
| 2
| 19
|
3,311
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3ForQuestionAnswering
|
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ...utils import auto_docstring, logging, torch_int
@auto_docstring
class LayoutLMv3ForQuestionAnswering(LayoutLMv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv3 = LayoutLMv3Model(config)
self.qa_outputs = LayoutLMv3ClassificationHead(config, pool_feature=False)
self.init_weights()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, bbox: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.LongTensor]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
"""
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForQuestionAnswering
>>> from datasets import load_dataset
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> question = "what's his name?"
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, question, words, boxes=boxes, return_tensors="pt")
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
>>> start_scores = outputs.start_logits
>>> end_scores = outputs.end_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class LayoutLMv3ForQuestionAnswering(LayoutLMv3PreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, bbox: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.LongTensor]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
'''
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForQuestionAnswering
>>> from datasets import load_dataset
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> question = "what's his name?"
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, question, words, boxes=boxes, return_tensors="pt")
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
>>> start_scores = outputs.start_logits
>>> end_scores = outputs.end_logits
```'''
pass
| 5
| 1
| 57
| 9
| 32
| 17
| 4
| 0.48
| 1
| 5
| 3
| 0
| 2
| 3
| 2
| 3
| 120
| 18
| 69
| 34
| 47
| 33
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
3,312
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3ForSequenceClassification
|
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch.nn as nn
import torch
from ...utils import auto_docstring, logging, torch_int
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch.nn.functional as F
@auto_docstring(custom_intro='\n LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the\n [CLS] token) e.g. for document image classification tasks such as the\n [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.\n ')
class LayoutLMv3ForSequenceClassification(LayoutLMv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.layoutlmv3 = LayoutLMv3Model(config)
self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
self.init_weights()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, bbox: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.LongTensor]=None) -> Union[tuple, SequenceClassifierOutput]:
"""
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForSequenceClassification
>>> from datasets import load_dataset
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
>>> sequence_label = torch.tensor([1])
>>> outputs = model(**encoding, labels=sequence_label)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values)
sequence_output = outputs[0][:, 0, :]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the\n [CLS] token) e.g. for document image classification tasks such as the\n [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.\n ')
class LayoutLMv3ForSequenceClassification(LayoutLMv3PreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, bbox: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.LongTensor]=None) -> Union[tuple, SequenceClassifierOutput]:
'''
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForSequenceClassification
>>> from datasets import load_dataset
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
>>> sequence_label = torch.tensor([1])
>>> outputs = model(**encoding, labels=sequence_label)
>>> loss = outputs.loss
>>> logits = outputs.logits
```'''
pass
| 5
| 1
| 51
| 7
| 34
| 10
| 7
| 0.27
| 1
| 6
| 3
| 0
| 2
| 4
| 2
| 3
| 107
| 14
| 73
| 30
| 52
| 20
| 33
| 13
| 30
| 12
| 2
| 3
| 13
|
3,313
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3ForTokenClassification
|
import torch.nn as nn
from ...modeling_outputs import BaseModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import auto_docstring, logging, torch_int
import torch.nn.functional as F
import torch
from typing import Optional, Union
@auto_docstring(custom_intro='\n LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g.\n for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/),\n [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and\n [Kleister-NDA](https://github.com/applicaai/kleister-nda).\n ')
class LayoutLMv3ForTokenClassification(LayoutLMv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv3 = LayoutLMv3Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if config.num_labels < 10:
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
else:
self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
self.init_weights()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, bbox: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, pixel_values: Optional[torch.LongTensor]=None) -> Union[tuple, TokenClassifierOutput]:
"""
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForTokenClassification
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7)
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> word_labels = example["ner_tags"]
>>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
>>> outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values=pixel_values)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
sequence_output = outputs[0][:, :seq_length]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g.\n for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/),\n [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and\n [Kleister-NDA](https://github.com/applicaai/kleister-nda).\n ')
class LayoutLMv3ForTokenClassification(LayoutLMv3PreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, bbox: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, pixel_values: Optional[torch.LongTensor]=None) -> Union[tuple, TokenClassifierOutput]:
'''
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForTokenClassification
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7)
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> word_labels = example["ner_tags"]
>>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
>>> outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```'''
pass
| 5
| 1
| 48
| 7
| 30
| 11
| 4
| 0.34
| 1
| 5
| 3
| 0
| 2
| 4
| 2
| 3
| 102
| 15
| 65
| 32
| 44
| 22
| 28
| 15
| 25
| 6
| 2
| 1
| 8
|
3,314
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3Intermediate
|
import torch.nn as nn
import torch
from ...activations import ACT2FN
import torch.nn.functional as F
class LayoutLMv3Intermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class LayoutLMv3Intermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
3,315
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3Layer
|
from ...modeling_layers import GradientCheckpointingLayer
from ...pytorch_utils import apply_chunking_to_forward
class LayoutLMv3Layer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv3Attention(config)
self.intermediate = LayoutLMv3Intermediate(config)
self.output = LayoutLMv3Output(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None):
self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class LayoutLMv3Layer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None):
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 13
| 1
| 12
| 0
| 1
| 0.03
| 1
| 4
| 3
| 0
| 3
| 5
| 3
| 13
| 41
| 5
| 36
| 23
| 24
| 1
| 19
| 15
| 15
| 1
| 1
| 0
| 3
|
3,316
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3Model
|
import torch
import torch.nn.functional as F
from ...utils import auto_docstring, logging, torch_int
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch.nn as nn
@auto_docstring
class LayoutLMv3Model(LayoutLMv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
if config.text_embed:
self.embeddings = LayoutLMv3TextEmbeddings(config)
if config.visual_embed:
self.patch_embed = LayoutLMv3PatchEmbeddings(config)
size = int(config.input_size / config.patch_size)
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.pos_embed = nn.Parameter(torch.zeros(1, size * size + 1, config.hidden_size))
self.pos_drop = nn.Dropout(p=0.0)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
self.init_visual_bbox(image_size=(size, size))
self.norm = nn.LayerNorm(config.hidden_size, eps=1e-06)
self.encoder = LayoutLMv3Encoder(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def init_visual_bbox(self, image_size=(14, 14), max_len=1000):
"""
Create the bounding boxes for the visual (patch) tokens.
"""
visual_bbox_x = torch.div(torch.arange(0, max_len * (image_size[1] + 1), max_len), image_size[1], rounding_mode='trunc')
visual_bbox_y = torch.div(torch.arange(0, max_len * (image_size[0] + 1), max_len), image_size[0], rounding_mode='trunc')
visual_bbox = torch.stack([visual_bbox_x[:-1].repeat(image_size[0], 1), visual_bbox_y[:-1].repeat(image_size[1], 1).transpose(0, 1), visual_bbox_x[1:].repeat(image_size[0], 1), visual_bbox_y[1:].repeat(image_size[1], 1).transpose(0, 1)], dim=-1).view(-1, 4)
cls_token_box = torch.tensor([[0 + 1, 0 + 1, max_len - 1, max_len - 1]])
self.visual_bbox = torch.cat([cls_token_box, visual_bbox], dim=0)
def calculate_visual_bbox(self, device, dtype, batch_size):
visual_bbox = self.visual_bbox.repeat(batch_size, 1, 1)
visual_bbox = visual_bbox.to(device).type(dtype)
return visual_bbox
def forward_image(self, pixel_values):
embeddings = self.patch_embed(pixel_values)
batch_size, seq_len, _ = embeddings.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
if self.pos_embed is not None:
embeddings = embeddings + self.pos_embed
embeddings = self.pos_drop(embeddings)
embeddings = self.norm(embeddings)
return embeddings
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, bbox: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
bbox (`torch.LongTensor` of shape `(batch_size, token_sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
token_type_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, token_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModel.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif pixel_values is not None:
batch_size = len(pixel_values)
device = pixel_values.device
else:
raise ValueError('You have to specify either input_ids or inputs_embeds or pixel_values')
if input_ids is not None or inputs_embeds is not None:
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
embedding_output = self.embeddings(input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
final_bbox = final_position_ids = None
patch_height = patch_width = None
if pixel_values is not None:
patch_height, patch_width = (torch_int(pixel_values.shape[2] / self.config.patch_size), torch_int(pixel_values.shape[3] / self.config.patch_size))
visual_embeddings = self.forward_image(pixel_values)
visual_attention_mask = torch.ones((batch_size, visual_embeddings.shape[1]), dtype=torch.long, device=device)
if attention_mask is not None:
attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
else:
attention_mask = visual_attention_mask
if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
if self.config.has_spatial_attention_bias:
visual_bbox = self.calculate_visual_bbox(device, dtype=torch.long, batch_size=batch_size)
if bbox is not None:
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
else:
final_bbox = visual_bbox
visual_position_ids = torch.arange(0, visual_embeddings.shape[1], dtype=torch.long, device=device).repeat(batch_size, 1)
if input_ids is not None or inputs_embeds is not None:
position_ids = torch.arange(0, input_shape[1], device=device).unsqueeze(0)
position_ids = position_ids.expand(input_shape)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
else:
final_position_ids = visual_position_ids
if input_ids is not None or inputs_embeds is not None:
embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)
else:
embedding_output = visual_embeddings
embedding_output = self.LayerNorm(embedding_output)
embedding_output = self.dropout(embedding_output)
elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
if self.config.has_spatial_attention_bias:
final_bbox = bbox
if self.config.has_relative_attention_bias:
position_ids = self.embeddings.position_ids[:, :input_shape[1]]
position_ids = position_ids.expand_as(input_ids)
final_position_ids = position_ids
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, None, device, dtype=embedding_output.dtype)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(embedding_output, bbox=final_bbox, position_ids=final_position_ids, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, patch_height=patch_height, patch_width=patch_width)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class LayoutLMv3Model(LayoutLMv3PreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
def init_visual_bbox(self, image_size=(14, 14), max_len=1000):
'''
Create the bounding boxes for the visual (patch) tokens.
'''
pass
def calculate_visual_bbox(self, device, dtype, batch_size):
pass
def forward_image(self, pixel_values):
pass
@auto_docstring
def forward_image(self, pixel_values):
'''
input_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
bbox (`torch.LongTensor` of shape `(batch_size, token_sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
token_type_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, token_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModel.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```'''
pass
| 11
| 3
| 30
| 4
| 21
| 4
| 4
| 0.19
| 1
| 11
| 4
| 0
| 8
| 11
| 8
| 9
| 249
| 40
| 176
| 59
| 150
| 33
| 107
| 43
| 98
| 22
| 2
| 4
| 34
|
3,317
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3Output
|
import torch.nn.functional as F
import torch
import torch.nn as nn
class LayoutLMv3Output(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class LayoutLMv3Output(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,318
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3PatchEmbeddings
|
import collections
import torch.nn.functional as F
import torch.nn as nn
class LayoutLMv3PatchEmbeddings(nn.Module):
"""LayoutLMv3 image (patch) embeddings. This class also automatically interpolates the position embeddings for varying
image sizes."""
def __init__(self, config):
super().__init__()
image_size = config.input_size if isinstance(config.input_size, collections.abc.Iterable) else (config.input_size, config.input_size)
patch_size = config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size)
self.patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.proj = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values, position_embedding=None):
embeddings = self.proj(pixel_values)
if position_embedding is not None:
position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1)
position_embedding = position_embedding.permute(0, 3, 1, 2)
patch_height, patch_width = (embeddings.shape[2], embeddings.shape[3])
position_embedding = F.interpolate(position_embedding, size=(patch_height, patch_width), mode='bicubic')
embeddings = embeddings + position_embedding
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings
|
class LayoutLMv3PatchEmbeddings(nn.Module):
'''LayoutLMv3 image (patch) embeddings. This class also automatically interpolates the position embeddings for varying
image sizes.'''
def __init__(self, config):
pass
def forward(self, pixel_values, position_embedding=None):
pass
| 3
| 1
| 14
| 2
| 12
| 1
| 3
| 0.12
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 33
| 5
| 25
| 9
| 22
| 3
| 17
| 9
| 14
| 3
| 1
| 1
| 5
|
3,319
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3PreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging, torch_int
import torch.nn as nn
from .configuration_layoutlmv3 import LayoutLMv3Config
@auto_docstring
class LayoutLMv3PreTrainedModel(PreTrainedModel):
config: LayoutLMv3Config
base_model_prefix = 'layoutlmv3'
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, LayoutLMv3Model):
if self.config.visual_embed:
module.cls_token.data.zero_()
module.pos_embed.data.zero_()
|
@auto_docstring
class LayoutLMv3PreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.47
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 24
| 2
| 15
| 4
| 13
| 7
| 13
| 4
| 11
| 6
| 1
| 2
| 6
|
3,320
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3SelfAttention
|
import torch.nn.functional as F
import math
import torch.nn as nn
import torch
class LayoutLMv3SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
def cogview_attention(self, attention_scores, alpha=32):
"""
https://huggingface.co/papers/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation
(PB-Relax). A replacement of the original nn.Softmax(dim=-1)(attention_scores). Seems the new attention_probs
will result in a slower speed and a little bias. Can use torch.allclose(standard_attention_probs,
cogview_attention_probs, atol=1e-08) for comparison. The smaller atol (e.g., 1e-08), the better.
"""
scaled_attention_scores = attention_scores / alpha
max_value = scaled_attention_scores.amax(dim=-1).unsqueeze(-1)
new_attention_scores = (scaled_attention_scores - max_value) * alpha
return nn.Softmax(dim=-1)(new_attention_scores)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None):
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if self.has_relative_attention_bias and self.has_spatial_attention_bias:
attention_scores += (rel_pos + rel_2d_pos) / math.sqrt(self.attention_head_size)
elif self.has_relative_attention_bias:
attention_scores += rel_pos / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = self.cogview_attention(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class LayoutLMv3SelfAttention(nn.Module):
def __init__(self, config):
pass
def cogview_attention(self, attention_scores, alpha=32):
'''
https://huggingface.co/papers/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation
(PB-Relax). A replacement of the original nn.Softmax(dim=-1)(attention_scores). Seems the new attention_probs
will result in a slower speed and a little bias. Can use torch.allclose(standard_attention_probs,
cogview_attention_probs, atol=1e-08) for comparison. The smaller atol (e.g., 1e-08), the better.
'''
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None):
pass
| 4
| 1
| 21
| 4
| 14
| 4
| 3
| 0.27
| 1
| 3
| 0
| 0
| 4
| 9
| 4
| 14
| 88
| 17
| 56
| 35
| 43
| 15
| 44
| 27
| 39
| 6
| 1
| 1
| 10
|
3,321
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3SelfOutput
|
import torch.nn as nn
import torch
import torch.nn.functional as F
class LayoutLMv3SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class LayoutLMv3SelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,322
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
transformers.models.layoutlmv3.modeling_layoutlmv3.LayoutLMv3TextEmbeddings
|
import torch.nn as nn
import torch
import torch.nn.functional as F
class LayoutLMv3TextEmbeddings(nn.Module):
"""
LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
def calculate_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError('The `bbox` coordinate values should be within 0-1000 range.') from e
h_position_embeddings = self.h_position_embeddings(torch.clip(bbox[:, :, 3] - bbox[:, :, 1], 0, 1023))
w_position_embeddings = self.w_position_embeddings(torch.clip(bbox[:, :, 2] - bbox[:, :, 0], 0, 1023))
spatial_position_embeddings = torch.cat([left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings], dim=-1)
return spatial_position_embeddings
def create_position_ids_from_input_ids(self, input_ids, padding_idx):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape)
def forward(self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if position_ids is None:
if input_ids is not None:
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox)
embeddings = embeddings + spatial_position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class LayoutLMv3TextEmbeddings(nn.Module):
'''
LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings.
'''
def __init__(self, config):
pass
def calculate_spatial_position_embeddings(self, bbox):
pass
def create_position_ids_from_input_ids(self, input_ids, padding_idx):
'''
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
'''
pass
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
'''
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
'''
pass
def forward(self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
pass
| 6
| 3
| 21
| 3
| 16
| 2
| 2
| 0.17
| 1
| 2
| 0
| 0
| 5
| 10
| 5
| 15
| 116
| 19
| 83
| 41
| 70
| 14
| 56
| 33
| 50
| 6
| 1
| 2
| 11
|
3,323
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/processing_layoutlmv3.py
|
transformers.models.layoutlmv3.processing_layoutlmv3.LayoutLMv3Processor
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from typing import Optional, Union
from ...utils import TensorType
class LayoutLMv3Processor(ProcessorMixin):
"""
Constructs a LayoutLMv3 processor which combines a LayoutLMv3 image processor and a LayoutLMv3 tokenizer into a
single processor.
[`LayoutLMv3Processor`] offers all the functionalities you need to prepare data for the model.
It first uses [`LayoutLMv3ImageProcessor`] to resize and normalize document images, and optionally applies OCR to
get words and normalized bounding boxes. These are then provided to [`LayoutLMv3Tokenizer`] or
[`LayoutLMv3TokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
`attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
into token-level `labels` for token classification tasks (such as FUNSD, CORD).
Args:
image_processor (`LayoutLMv3ImageProcessor`, *optional*):
An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input.
tokenizer (`LayoutLMv3Tokenizer` or `LayoutLMv3TokenizerFast`, *optional*):
An instance of [`LayoutLMv3Tokenizer`] or [`LayoutLMv3TokenizerFast`]. The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'LayoutLMv3ImageProcessor'
tokenizer_class = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if 'feature_extractor' in kwargs:
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = image_processor if image_processor is not None else feature_extractor
super().__init__(image_processor, tokenizer)
def __call__(self, images, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:
"""
This method first forwards the `images` argument to [`~LayoutLMv3ImageProcessor.__call__`]. In case
[`LayoutLMv3ImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
bounding boxes along with the additional arguments to [`~LayoutLMv3Tokenizer.__call__`] and returns the output,
together with resized and normalized `pixel_values`. In case [`LayoutLMv3ImageProcessor`] was initialized with
`apply_ocr` set to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along
with the additional arguments to [`~LayoutLMv3Tokenizer.__call__`] and returns the output, together with
resized and normalized `pixel_values`.
Please refer to the docstring of the above two methods for more information.
"""
if self.image_processor.apply_ocr and boxes is not None:
raise ValueError('You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.')
if self.image_processor.apply_ocr and word_labels is not None:
raise ValueError('You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
features = self.image_processor(images=images, return_tensors=return_tensors)
if text is not None and self.image_processor.apply_ocr and (text_pair is None):
if isinstance(text, str):
text = [text]
text_pair = features['words']
encoded_inputs = self.tokenizer(text=text if text is not None else features['words'], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['boxes'], word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)
images = features.pop('pixel_values')
if return_overflowing_tokens is True:
images = self.get_overflowing_images(images, encoded_inputs['overflow_to_sample_mapping'])
encoded_inputs['pixel_values'] = images
return encoded_inputs
def get_overflowing_images(self, images, overflow_to_sample_mapping):
images_with_overflow = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(images_with_overflow) != len(overflow_to_sample_mapping):
raise ValueError(f'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}')
return images_with_overflow
@property
def model_input_names(self):
return ['input_ids', 'bbox', 'attention_mask', 'pixel_values']
@property
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor
|
class LayoutLMv3Processor(ProcessorMixin):
'''
Constructs a LayoutLMv3 processor which combines a LayoutLMv3 image processor and a LayoutLMv3 tokenizer into a
single processor.
[`LayoutLMv3Processor`] offers all the functionalities you need to prepare data for the model.
It first uses [`LayoutLMv3ImageProcessor`] to resize and normalize document images, and optionally applies OCR to
get words and normalized bounding boxes. These are then provided to [`LayoutLMv3Tokenizer`] or
[`LayoutLMv3TokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
`attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
into token-level `labels` for token classification tasks (such as FUNSD, CORD).
Args:
image_processor (`LayoutLMv3ImageProcessor`, *optional*):
An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input.
tokenizer (`LayoutLMv3Tokenizer` or `LayoutLMv3TokenizerFast`, *optional*):
An instance of [`LayoutLMv3Tokenizer`] or [`LayoutLMv3TokenizerFast`]. The tokenizer is a required input.
'''
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
pass
def __call__(self, images, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:
'''
This method first forwards the `images` argument to [`~LayoutLMv3ImageProcessor.__call__`]. In case
[`LayoutLMv3ImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
bounding boxes along with the additional arguments to [`~LayoutLMv3Tokenizer.__call__`] and returns the output,
together with resized and normalized `pixel_values`. In case [`LayoutLMv3ImageProcessor`] was initialized with
`apply_ocr` set to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along
with the additional arguments to [`~LayoutLMv3Tokenizer.__call__`] and returns the output, together with
resized and normalized `pixel_values`.
Please refer to the docstring of the above two methods for more information.
'''
pass
def get_overflowing_images(self, images, overflow_to_sample_mapping):
pass
@property
def model_input_names(self):
pass
@property
def feature_extractor_class(self):
pass
@property
def feature_extractor_class(self):
pass
| 10
| 2
| 17
| 1
| 13
| 3
| 3
| 0.35
| 1
| 8
| 2
| 0
| 8
| 0
| 8
| 25
| 173
| 23
| 112
| 42
| 78
| 39
| 50
| 17
| 41
| 9
| 2
| 2
| 22
|
3,324
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py
|
transformers.models.layoutlmv3.tokenization_layoutlmv3.LayoutLMv3Tokenizer
|
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from typing import Optional, Union
import json
from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
from ...tokenization_utils_base import BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy
import regex as re
import os
class LayoutLMv3Tokenizer(PreTrainedTokenizer):
"""
Construct a LayoutLMv3 tokenizer. Based on [`RoBERTatokenizer`] (Byte Pair Encoding or BPE).
[`LayoutLMv3Tokenizer`] can be used to turn words, word-level bounding boxes and optional word labels to
token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and optional `labels` (for token
classification).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
[`LayoutLMv3Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the
word-level bounding boxes into token-level bounding boxes.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `True`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
cls_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask', 'bbox']
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=True, cls_token_box=[0, 0, 0, 0], sep_token_box=[0, 0, 0, 0], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
vocab = dict(self.encoder).copy()
vocab.update(self.added_tokens_encoder)
return vocab
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write(' '.join(bpe_tokens) + '\n')
index += 1
return (vocab_file, merge_file)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())) and (sum([text.startswith(no_split_token) for no_split_token in self.added_tokens_encoder]) == 0):
text = ' ' + text
return (text, kwargs)
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if len(t) == 0:
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
if not _is_valid_text_input(text):
raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')
if not isinstance(text_pair, (list, tuple)):
raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')
elif not isinstance(text, (list, tuple)):
raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError('You must provide corresponding bounding boxes')
if is_batched:
if len(words) != len(boxes):
raise ValueError('You must provide words and boxes for an equal amount of examples')
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError('You must provide as many words as there are bounding boxes')
elif len(words) != len(boxes):
raise ValueError('You must provide as many words as there are bounding boxes')
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.')
batch_outputs = self._batch_prepare_for_model(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose)
return BatchEncoding(batch_outputs)
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens.
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
"""
batch_outputs = {}
for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
batch_text_or_text_pair, boxes_example = example
outputs = self.prepare_for_model(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING)
def encode(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> list[int]:
encoded_inputs = self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
return encoded_inputs['input_ids']
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674')
return self.prepare_for_model(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose)
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
"""
Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
truncates sequences if overflowing while taking into account the special tokens and manages a moving window
(with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
*truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
combination of arguments will raise an error.
Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
labeled with -100, such that they will be ignored by the loss function.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
tokens = []
pair_tokens = []
token_boxes = []
pair_token_boxes = []
labels = []
if text_pair is None:
if word_labels is None:
for word, box in zip(text, boxes):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
else:
for word, box, label in zip(text, boxes, word_labels):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
if self.only_label_first_subword:
labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
else:
labels.extend([label] * len(word_tokens))
else:
tokens = self.tokenize(text)
token_boxes = [self.pad_token_box for _ in range(len(tokens))]
for word, box in zip(text_pair, boxes):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
pair_tokens.extend(word_tokens)
pair_token_boxes.extend([box] * len(word_tokens))
ids = self.convert_tokens_to_ids(tokens)
pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
if return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and (pair_ids is not None):
raise ValueError('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.')
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
overflowing_tokens = []
overflowing_token_boxes = []
overflowing_labels = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and (total_len > max_length):
ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels = self.truncate_sequences(ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride)
if return_token_type_ids and (not add_special_tokens):
raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')
if return_token_type_ids is None:
return_token_type_ids = 'token_type_ids' in self.model_input_names
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
encoded_inputs = {}
if return_overflowing_tokens:
encoded_inputs['overflowing_tokens'] = overflowing_tokens
encoded_inputs['overflowing_token_boxes'] = overflowing_token_boxes
encoded_inputs['overflowing_labels'] = overflowing_labels
encoded_inputs['num_truncated_tokens'] = total_len - max_length
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
if pair_token_boxes:
pair_token_boxes = [self.sep_token_box] + pair_token_boxes + [self.sep_token_box]
token_boxes = token_boxes + pair_token_boxes if pair else token_boxes
if labels:
labels = [self.pad_token_label] + labels + [self.pad_token_label]
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
token_boxes = token_boxes + pair_token_boxes if pair else token_boxes
encoded_inputs['input_ids'] = sequence
encoded_inputs['bbox'] = token_boxes
if return_token_type_ids:
encoded_inputs['token_type_ids'] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs['special_tokens_mask'] = [0] * len(sequence)
if labels:
encoded_inputs['labels'] = labels
self._eventual_warn_about_too_long_sequence(encoded_inputs['input_ids'], max_length, verbose)
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
if return_length:
encoded_inputs['length'] = len(encoded_inputs['input_ids'])
batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)
return batch_outputs
def truncate_sequences(self, ids: list[int], token_boxes: list[list[int]], pair_ids: Optional[list[int]]=None, pair_token_boxes: Optional[list[list[int]]]=None, labels: Optional[list[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[str, TruncationStrategy]='longest_first', stride: int=0) -> tuple[list[int], list[int], list[int]]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
ids (`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
token_boxes (`List[List[int]]`):
Bounding boxes of the first sequence.
pair_ids (`List[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
pair_token_boxes (`List[List[int]]`, *optional*):
Bounding boxes of the second sequence.
labels (`List[int]`, *optional*):
Labels of the first sequence (for token classification tasks).
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
of sequences (or a batch of pairs) is provided.
"""
if num_tokens_to_remove <= 0:
return (ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [])
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
overflowing_token_boxes = []
overflowing_labels = []
if truncation_strategy == TruncationStrategy.ONLY_FIRST or (truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None):
if len(ids) > num_tokens_to_remove:
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
overflowing_token_boxes = token_boxes[-window_len:]
overflowing_labels = labels[-window_len:]
ids = ids[:-num_tokens_to_remove]
token_boxes = token_boxes[:-num_tokens_to_remove]
labels = labels[:-num_tokens_to_remove]
else:
error_msg = f'We need to remove {num_tokens_to_remove} to truncate the input but the first sequence has a length {len(ids)}. '
if truncation_strategy == TruncationStrategy.ONLY_FIRST:
error_msg = error_msg + f"Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_second'."
logger.error(error_msg)
elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
logger.warning(f"Be aware, overflowing tokens are not returned for the setting you have chosen, i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' truncation strategy. So the returned list will always be empty even if some tokens have been removed.")
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
ids = ids[:-1]
token_boxes = token_boxes[:-1]
labels = labels[:-1]
else:
pair_ids = pair_ids[:-1]
pair_token_boxes = pair_token_boxes[:-1]
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
if len(pair_ids) > num_tokens_to_remove:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
overflowing_token_boxes = pair_token_boxes[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
else:
logger.error(f"We need to remove {num_tokens_to_remove} to truncate the input but the second sequence has a length {len(pair_ids)}. Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_first'.")
return (ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels)
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
if return_attention_mask and 'attention_mask' not in encoded_inputs:
encoded_inputs['attention_mask'] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == 'right':
if return_attention_mask:
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = encoded_inputs['bbox'] + [self.pad_token_box] * difference
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == 'left':
if return_attention_mask:
encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids']
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = [self.pad_token_box] * difference + encoded_inputs['bbox']
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels']
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError('Invalid padding strategy:' + str(padding_side))
return encoded_inputs
|
class LayoutLMv3Tokenizer(PreTrainedTokenizer):
'''
Construct a LayoutLMv3 tokenizer. Based on [`RoBERTatokenizer`] (Byte Pair Encoding or BPE).
[`LayoutLMv3Tokenizer`] can be used to turn words, word-level bounding boxes and optional word labels to
token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and optional `labels` (for token
classification).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
[`LayoutLMv3Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the
word-level bounding boxes into token-level bounding boxes.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `True`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
cls_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
'''
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=True, cls_token_box=[0, 0, 0, 0], sep_token_box=[0, 0, 0, 0], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def bpe(self, token):
pass
def _tokenize(self, text):
'''Tokenize a string.'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
pass
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
'''
pass
def _is_valid_text_input(t):
pass
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
'''
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens.
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
'''
pass
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING)
def encode(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> list[int]:
pass
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
'''
pass
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
'''
Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
truncates sequences if overflowing while taking into account the special tokens and manages a moving window
(with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
*truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
combination of arguments will raise an error.
Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
labeled with -100, such that they will be ignored by the loss function.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
'''
pass
def truncate_sequences(self, ids: list[int], token_boxes: list[list[int]], pair_ids: Optional[list[int]]=None, pair_token_boxes: Optional[list[list[int]]]=None, labels: Optional[list[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[str, TruncationStrategy]='longest_first', stride: int=0) -> tuple[list[int], list[int], list[int]]:
'''
Truncates a sequence pair in-place following the strategy.
Args:
ids (`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
token_boxes (`List[List[int]]`):
Bounding boxes of the first sequence.
pair_ids (`List[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
pair_token_boxes (`List[List[int]]`, *optional*):
Bounding boxes of the second sequence.
labels (`List[int]`, *optional*):
Labels of the first sequence (for token classification tasks).
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
of sequences (or a batch of pairs) is provided.
'''
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
'''
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
| 32
| 14
| 50
| 3
| 39
| 8
| 6
| 0.3
| 1
| 16
| 2
| 0
| 23
| 14
| 23
| 112
| 1,301
| 113
| 925
| 345
| 667
| 274
| 357
| 107
| 332
| 32
| 3
| 4
| 137
|
3,325
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py
|
transformers.models.layoutlmv3.tokenization_layoutlmv3_fast.LayoutLMv3TokenizerFast
|
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import add_end_docstrings, logging
from ...tokenization_utils_base import BatchEncoding, EncodedInput, PaddingStrategy, PreTokenizedInput, TensorType, TextInput, TextInputPair, TruncationStrategy
from .tokenization_layoutlmv3 import LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, LayoutLMv3Tokenizer
from tokenizers import processors
import json
from typing import Optional, Union
class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" LayoutLMv3 tokenizer (backed by HuggingFace's *tokenizers* library). Based on BPE.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
trim_offsets (`bool`, *optional*, defaults to `True`):
Whether the post processing step should trim offsets to avoid including whitespaces.
cls_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = LayoutLMv3Tokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=True, trim_offsets=True, cls_token_box=[0, 0, 0, 0], sep_token_box=[0, 0, 0, 0], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs)
tokenizer_component = 'post_processor'
tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
if tokenizer_component_instance:
state = json.loads(tokenizer_component_instance.__getstate__())
if 'sep' in state:
state['sep'] = tuple(state['sep'])
if 'cls' in state:
state['cls'] = tuple(state['cls'])
changes_to_apply = False
if state.get('add_prefix_space', add_prefix_space) != add_prefix_space:
state['add_prefix_space'] = add_prefix_space
changes_to_apply = True
if state.get('trim_offsets', trim_offsets) != trim_offsets:
state['trim_offsets'] = trim_offsets
changes_to_apply = True
if changes_to_apply:
component_class = getattr(processors, state.pop('type'))
new_value = component_class(**state)
setattr(self.backend_tokenizer, tokenizer_component, new_value)
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if len(t) == 0:
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
if not _is_valid_text_input(text):
raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')
if not isinstance(text_pair, (list, tuple)):
raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')
elif not isinstance(text, (list, tuple)):
raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError('You must provide corresponding bounding boxes')
if is_batched:
if len(words) != len(boxes):
raise ValueError('You must provide words and boxes for an equal amount of examples')
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError('You must provide as many words as there are bounding boxes')
elif len(words) != len(boxes):
raise ValueError('You must provide as many words as there are bounding boxes')
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]:
batched_input = [(text, pair)] if pair else [text]
encodings = self._tokenizer.encode_batch(batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs)
return encodings[0].tokens
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise TypeError(f'batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})')
self.set_truncation_and_padding(padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side)
if is_pair:
batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
encodings = self._tokenizer.encode_batch(batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True)
tokens_and_encodings = [self._convert_encoding(encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, return_length=return_length, verbose=verbose) for encoding in encodings]
sanitized_tokens = {}
for key in tokens_and_encodings[0][0]:
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks['input_ids'])
sanitized_tokens['overflow_to_sample_mapping'] = overflow_to_sample_mapping
for input_ids in sanitized_tokens['input_ids']:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
token_boxes = []
for batch_index in range(len(sanitized_tokens['input_ids'])):
if return_overflowing_tokens:
original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]
else:
original_index = batch_index
token_boxes_example = []
for id, sequence_id, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids):
if word_id is not None:
if is_pair and sequence_id == 0:
token_boxes_example.append(self.pad_token_box)
else:
token_boxes_example.append(boxes[original_index][word_id])
elif id == self.cls_token_id:
token_boxes_example.append(self.cls_token_box)
elif id == self.sep_token_id:
token_boxes_example.append(self.sep_token_box)
elif id == self.pad_token_id:
token_boxes_example.append(self.pad_token_box)
else:
raise ValueError('Id not recognized')
token_boxes.append(token_boxes_example)
sanitized_tokens['bbox'] = token_boxes
if word_labels is not None:
labels = []
for batch_index in range(len(sanitized_tokens['input_ids'])):
if return_overflowing_tokens:
original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]
else:
original_index = batch_index
labels_example = []
previous_token_empty = False
for id, offset, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_tokens['offset_mapping'][batch_index], sanitized_encodings[batch_index].word_ids):
if word_id is not None:
if self.only_label_first_subword:
if offset[0] == 0 and (not previous_token_empty):
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
if offset == (0, 0):
previous_token_empty = True
else:
previous_token_empty = False
else:
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
labels.append(labels_example)
sanitized_tokens['labels'] = labels
if not return_offsets_mapping:
del sanitized_tokens['offset_mapping']
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
batched_input = [(text, text_pair)] if text_pair else [text]
batched_boxes = [boxes]
batched_word_labels = [word_labels] if word_labels is not None else None
batched_output = self._batch_encode_plus(batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
if return_tensors is None and (not return_overflowing_tokens):
batched_output = BatchEncoding({key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items()}, batched_output.encodings)
self._eventual_warn_about_too_long_sequence(batched_output['input_ids'], max_length, verbose)
return batched_output
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
if return_attention_mask and 'attention_mask' not in encoded_inputs:
encoded_inputs['attention_mask'] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == 'right':
if return_attention_mask:
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = encoded_inputs['bbox'] + [self.pad_token_box] * difference
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == 'left':
if return_attention_mask:
encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids']
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = [self.pad_token_box] * difference + encoded_inputs['bbox']
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels']
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError('Invalid padding strategy:' + str(padding_side))
return encoded_inputs
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return output
return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Args:
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not:
make use of token type ids, therefore a list of zeros is returned.
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" LayoutLMv3 tokenizer (backed by HuggingFace's *tokenizers* library). Based on BPE.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
trim_offsets (`bool`, *optional*, defaults to `True`):
Whether the post processing step should trim offsets to avoid including whitespaces.
cls_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
'''
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=True, trim_offsets=True, cls_token_box=[0, 0, 0, 0], sep_token_box=[0, 0, 0, 0], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs):
pass
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
'''
pass
def _is_valid_text_input(t):
pass
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]:
pass
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
'''
pass
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
pass
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
'''
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Args:
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not:
make use of token type ids, therefore a list of zeros is returned.
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
| 16
| 5
| 60
| 4
| 48
| 9
| 7
| 0.29
| 1
| 15
| 2
| 0
| 11
| 5
| 11
| 99
| 797
| 67
| 568
| 216
| 404
| 164
| 202
| 65
| 189
| 25
| 3
| 6
| 87
|
3,326
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutxlm/processing_layoutxlm.py
|
transformers.models.layoutxlm.processing_layoutxlm.LayoutXLMProcessor
|
from ...utils import TensorType
from ...processing_utils import ProcessorMixin
from typing import Optional, Union
import warnings
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
class LayoutXLMProcessor(ProcessorMixin):
"""
Constructs a LayoutXLM processor which combines a LayoutXLM image processor and a LayoutXLM tokenizer into a single
processor.
[`LayoutXLMProcessor`] offers all the functionalities you need to prepare data for the model.
It first uses [`LayoutLMv2ImageProcessor`] to resize document images to a fixed size, and optionally applies OCR to
get words and normalized bounding boxes. These are then provided to [`LayoutXLMTokenizer`] or
[`LayoutXLMTokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
`attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
into token-level `labels` for token classification tasks (such as FUNSD, CORD).
Args:
image_processor (`LayoutLMv2ImageProcessor`, *optional*):
An instance of [`LayoutLMv2ImageProcessor`]. The image processor is a required input.
tokenizer (`LayoutXLMTokenizer` or `LayoutXLMTokenizerFast`, *optional*):
An instance of [`LayoutXLMTokenizer`] or [`LayoutXLMTokenizerFast`]. The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'LayoutLMv2ImageProcessor'
tokenizer_class = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if 'feature_extractor' in kwargs:
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = image_processor if image_processor is not None else feature_extractor
super().__init__(image_processor, tokenizer)
def __call__(self, images, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:
"""
This method first forwards the `images` argument to [`~LayoutLMv2ImagePrpcessor.__call__`]. In case
[`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
bounding boxes along with the additional arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output,
together with resized `images`. In case [`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to
`False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional
arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output, together with resized `images``.
Please refer to the docstring of the above two methods for more information.
"""
if self.image_processor.apply_ocr and boxes is not None:
raise ValueError('You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.')
if self.image_processor.apply_ocr and word_labels is not None:
raise ValueError('You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.')
features = self.image_processor(images=images, return_tensors=return_tensors)
if text is not None and self.image_processor.apply_ocr and (text_pair is None):
if isinstance(text, str):
text = [text]
text_pair = features['words']
encoded_inputs = self.tokenizer(text=text if text is not None else features['words'], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['boxes'], word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)
images = features.pop('pixel_values')
if return_overflowing_tokens is True:
images = self.get_overflowing_images(images, encoded_inputs['overflow_to_sample_mapping'])
encoded_inputs['image'] = images
return encoded_inputs
def get_overflowing_images(self, images, overflow_to_sample_mapping):
images_with_overflow = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(images_with_overflow) != len(overflow_to_sample_mapping):
raise ValueError(f'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}')
return images_with_overflow
@property
def model_input_names(self):
return ['input_ids', 'bbox', 'attention_mask', 'image']
@property
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor
|
class LayoutXLMProcessor(ProcessorMixin):
'''
Constructs a LayoutXLM processor which combines a LayoutXLM image processor and a LayoutXLM tokenizer into a single
processor.
[`LayoutXLMProcessor`] offers all the functionalities you need to prepare data for the model.
It first uses [`LayoutLMv2ImageProcessor`] to resize document images to a fixed size, and optionally applies OCR to
get words and normalized bounding boxes. These are then provided to [`LayoutXLMTokenizer`] or
[`LayoutXLMTokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
`attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
into token-level `labels` for token classification tasks (such as FUNSD, CORD).
Args:
image_processor (`LayoutLMv2ImageProcessor`, *optional*):
An instance of [`LayoutLMv2ImageProcessor`]. The image processor is a required input.
tokenizer (`LayoutXLMTokenizer` or `LayoutXLMTokenizerFast`, *optional*):
An instance of [`LayoutXLMTokenizer`] or [`LayoutXLMTokenizerFast`]. The tokenizer is a required input.
'''
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
pass
def __call__(self, images, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:
'''
This method first forwards the `images` argument to [`~LayoutLMv2ImagePrpcessor.__call__`]. In case
[`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
bounding boxes along with the additional arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output,
together with resized `images`. In case [`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to
`False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional
arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output, together with resized `images``.
Please refer to the docstring of the above two methods for more information.
'''
pass
def get_overflowing_images(self, images, overflow_to_sample_mapping):
pass
@property
def model_input_names(self):
pass
@property
def feature_extractor_class(self):
pass
@property
def feature_extractor_class(self):
pass
| 10
| 2
| 18
| 2
| 13
| 3
| 3
| 0.33
| 1
| 8
| 2
| 0
| 8
| 0
| 8
| 25
| 175
| 24
| 114
| 42
| 80
| 38
| 51
| 17
| 42
| 10
| 2
| 2
| 23
|
3,327
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutxlm/tokenization_layoutxlm.py
|
transformers.models.layoutxlm.tokenization_layoutxlm.LayoutXLMTokenizer
|
from ..xlm_roberta.tokenization_xlm_roberta import SPIECE_UNDERLINE, VOCAB_FILES_NAMES
import sentencepiece as spm
from shutil import copyfile
from ...tokenization_utils_base import BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy
from ...utils.import_utils import requires
from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
import os
from typing import Any, Optional, Union
@requires(backends=('sentencepiece',))
class LayoutXLMTokenizer(PreTrainedTokenizer):
"""
Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
cls_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
self.fairseq_offset = 1
self.fairseq_tokens_to_ids['<mask>'] = len(self.sp_model) + self.fairseq_offset
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
state['sp_model_proto'] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__.update(d)
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM-RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
return len(self.sp_model) + self.fairseq_offset + 1
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> list[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
@add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`list[list[int]]`, `list[list[list[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`list[int]`, `list[list[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if len(t) == 0:
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
if not _is_valid_text_input(text):
raise ValueError('text input must of type `str` (single example) or `list[str]` (batch of examples). ')
if not isinstance(text_pair, (list, tuple)):
raise ValueError('words must of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).')
elif not isinstance(text, (list, tuple)):
raise ValueError('Words must of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).')
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError('You must provide corresponding bounding boxes')
if is_batched:
if len(words) != len(boxes):
raise ValueError('You must provide words and boxes for an equal amount of examples')
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError('You must provide as many words as there are bounding boxes')
elif len(words) != len(boxes):
raise ValueError('You must provide as many words as there are bounding boxes')
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.')
batch_outputs = self._batch_prepare_for_model(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose)
return BatchEncoding(batch_outputs)
@add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
"""
batch_outputs = {}
for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
batch_text_or_text_pair, boxes_example = example
outputs = self.prepare_for_model(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674')
return self.prepare_for_model(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose)
@add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
def prepare_for_model(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
"""
Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
truncates sequences if overflowing while taking into account the special tokens and manages a moving window
(with user defined stride) for overflowing tokens.
Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
labeled with -100, such that they will be ignored by the loss function.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
tokens = []
pair_tokens = []
token_boxes = []
pair_token_boxes = []
labels = []
if text_pair is None:
if word_labels is None:
for word, box in zip(text, boxes):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
else:
for word, box, label in zip(text, boxes, word_labels):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
if self.only_label_first_subword:
labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
else:
labels.extend([label] * len(word_tokens))
else:
tokens = self.tokenize(text)
token_boxes = [self.pad_token_box for _ in range(len(tokens))] + [self.sep_token_box]
for word, box in zip(text_pair, boxes):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
pair_tokens.extend(word_tokens)
pair_token_boxes.extend([box] * len(word_tokens))
ids = self.convert_tokens_to_ids(tokens)
pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
overflowing_tokens = []
overflowing_token_boxes = []
overflowing_labels = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and (total_len > max_length):
ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels = self.truncate_sequences(ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride)
if return_token_type_ids and (not add_special_tokens):
raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')
if return_token_type_ids is None:
return_token_type_ids = 'token_type_ids' in self.model_input_names
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
encoded_inputs = {}
if return_overflowing_tokens:
encoded_inputs['overflowing_tokens'] = overflowing_tokens
encoded_inputs['overflowing_token_boxes'] = overflowing_token_boxes
encoded_inputs['overflowing_labels'] = overflowing_labels
encoded_inputs['num_truncated_tokens'] = total_len - max_length
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
if pair_token_boxes:
pair_token_boxes = pair_token_boxes + [self.sep_token_box]
if labels:
labels = [self.pad_token_label] + labels + [self.pad_token_label]
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
encoded_inputs['input_ids'] = sequence
encoded_inputs['bbox'] = token_boxes + pair_token_boxes
if return_token_type_ids:
encoded_inputs['token_type_ids'] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs['special_tokens_mask'] = [0] * len(sequence)
if labels:
encoded_inputs['labels'] = labels
self._eventual_warn_about_too_long_sequence(encoded_inputs['input_ids'], max_length, verbose)
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
if return_length:
encoded_inputs['length'] = len(encoded_inputs['input_ids'])
batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)
return batch_outputs
def truncate_sequences(self, ids: list[int], token_boxes: list[list[int]], pair_ids: Optional[list[int]]=None, pair_token_boxes: Optional[list[list[int]]]=None, labels: Optional[list[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[str, TruncationStrategy]='longest_first', stride: int=0) -> tuple[list[int], list[int], list[int]]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
ids (`list[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
token_boxes (`list[list[int]]`):
Bounding boxes of the first sequence.
pair_ids (`list[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
pair_token_boxes (`list[list[int]]`, *optional*):
Bounding boxes of the second sequence.
labels (`list[int]`, *optional*):
Labels of the first sequence (for token classification tasks).
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`tuple[list[int], list[int], list[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens.
"""
if num_tokens_to_remove <= 0:
return (ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [])
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
overflowing_token_boxes = []
overflowing_labels = []
if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
if not overflowing_tokens:
window_len = min(len(ids), stride + 1)
else:
window_len = 1
overflowing_tokens.extend(ids[-window_len:])
overflowing_token_boxes.extend(token_boxes[-window_len:])
overflowing_labels.extend(labels[-window_len:])
ids = ids[:-1]
token_boxes = token_boxes[:-1]
labels = labels[:-1]
else:
if not overflowing_tokens:
window_len = min(len(pair_ids), stride + 1)
else:
window_len = 1
overflowing_tokens.extend(pair_ids[-window_len:])
overflowing_token_boxes.extend(pair_token_boxes[-window_len:])
pair_ids = pair_ids[:-1]
pair_token_boxes = pair_token_boxes[:-1]
elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
if len(ids) > num_tokens_to_remove:
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
overflowing_token_boxes = token_boxes[-window_len:]
overflowing_labels = labels[-window_len:]
ids = ids[:-num_tokens_to_remove]
token_boxes = token_boxes[:-num_tokens_to_remove]
labels = labels[:-num_tokens_to_remove]
else:
logger.error(f"We need to remove {num_tokens_to_remove} to truncate the input but the first sequence has a length {len(ids)}. Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_second'.")
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
if len(pair_ids) > num_tokens_to_remove:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
overflowing_token_boxes = pair_token_boxes[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
else:
logger.error(f"We need to remove {num_tokens_to_remove} to truncate the input but the second sequence has a length {len(pair_ids)}. Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_first'.")
return (ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels)
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
if return_attention_mask and 'attention_mask' not in encoded_inputs:
encoded_inputs['attention_mask'] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == 'right':
if return_attention_mask:
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = encoded_inputs['bbox'] + [self.pad_token_box] * difference
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == 'left':
if return_attention_mask:
encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids']
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = [self.pad_token_box] * difference + encoded_inputs['bbox']
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels']
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError('Invalid padding strategy:' + str(padding_side))
return encoded_inputs
|
@requires(backends=('sentencepiece',))
class LayoutXLMTokenizer(PreTrainedTokenizer):
'''
Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
cls_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
'''
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM-RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def _tokenize(self, text: str) -> list[str]:
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (strings for sub-words) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
@add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`list[list[int]]`, `list[list[list[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`list[int]`, `list[list[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
'''
pass
def _is_valid_text_input(t):
pass
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
@add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
'''
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
'''
pass
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
@add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
def prepare_for_model(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
'''
Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
truncates sequences if overflowing while taking into account the special tokens and manages a moving window
(with user defined stride) for overflowing tokens.
Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
labeled with -100, such that they will be ignored by the loss function.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
'''
pass
def truncate_sequences(self, ids: list[int], token_boxes: list[list[int]], pair_ids: Optional[list[int]]=None, pair_token_boxes: Optional[list[list[int]]]=None, labels: Optional[list[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[str, TruncationStrategy]='longest_first', stride: int=0) -> tuple[list[int], list[int], list[int]]:
'''
Truncates a sequence pair in-place following the strategy.
Args:
ids (`list[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
token_boxes (`list[list[int]]`):
Bounding boxes of the first sequence.
pair_ids (`list[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
pair_token_boxes (`list[list[int]]`, *optional*):
Bounding boxes of the second sequence.
labels (`list[int]`, *optional*):
Labels of the first sequence (for token classification tasks).
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`tuple[list[int], list[int], list[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens.
'''
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
'''
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
| 27
| 12
| 46
| 4
| 33
| 9
| 6
| 0.35
| 1
| 15
| 2
| 0
| 20
| 11
| 20
| 109
| 1,041
| 113
| 692
| 246
| 512
| 245
| 303
| 86
| 281
| 29
| 3
| 4
| 120
|
3,328
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py
|
transformers.models.layoutxlm.tokenization_layoutxlm_fast.LayoutXLMTokenizerFast
|
from shutil import copyfile
import os
from ...tokenization_utils_base import BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy
from ...utils import PaddingStrategy, TensorType, add_end_docstrings, is_sentencepiece_available, logging
from ...tokenization_utils import AddedToken
from ..xlm_roberta.tokenization_xlm_roberta_fast import VOCAB_FILES_NAMES
from typing import Optional, Union
from ...tokenization_utils_fast import PreTrainedTokenizerFast
class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" LayoutXLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
[`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
cls_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = LayoutXLMTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs):
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs)
self.vocab_file = vocab_file
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
@add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`list[list[int]]`, `list[list[list[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`list[int]`, `list[list[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if len(t) == 0:
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
if not _is_valid_text_input(text):
raise ValueError('text input must of type `str` (single example) or `list[str]` (batch of examples). ')
if not isinstance(text_pair, (list, tuple)):
raise ValueError('words must of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).')
elif not isinstance(text, (list, tuple)):
raise ValueError('Words must of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).')
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError('You must provide corresponding bounding boxes')
if is_batched:
if len(words) != len(boxes):
raise ValueError('You must provide words and boxes for an equal amount of examples')
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError('You must provide as many words as there are bounding boxes')
elif len(words) != len(boxes):
raise ValueError('You must provide as many words as there are bounding boxes')
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]:
batched_input = [(text, pair)] if pair else [text]
self._tokenizer.encode_special_tokens = kwargs.pop('split_special_tokens', self._tokenizer.encode_special_tokens)
encodings = self._tokenizer.encode_batch(batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs)
return encodings[0].tokens
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise TypeError(f'batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})')
self.set_truncation_and_padding(padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side)
if is_pair:
batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
encodings = self._tokenizer.encode_batch(batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True)
tokens_and_encodings = [self._convert_encoding(encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, return_length=return_length, verbose=verbose) for encoding in encodings]
sanitized_tokens = {}
for key in tokens_and_encodings[0][0]:
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks['input_ids'])
sanitized_tokens['overflow_to_sample_mapping'] = overflow_to_sample_mapping
for input_ids in sanitized_tokens['input_ids']:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
token_boxes = []
for batch_index in range(len(sanitized_tokens['input_ids'])):
if return_overflowing_tokens:
original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]
else:
original_index = batch_index
token_boxes_example = []
for id, sequence_id, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids):
if word_id is not None:
if is_pair and sequence_id == 0:
token_boxes_example.append(self.pad_token_box)
else:
token_boxes_example.append(boxes[original_index][word_id])
elif id == self.cls_token_id:
token_boxes_example.append(self.cls_token_box)
elif id == self.sep_token_id:
token_boxes_example.append(self.sep_token_box)
elif id == self.pad_token_id:
token_boxes_example.append(self.pad_token_box)
else:
raise ValueError('Id not recognized')
token_boxes.append(token_boxes_example)
sanitized_tokens['bbox'] = token_boxes
if word_labels is not None:
labels = []
for batch_index in range(len(sanitized_tokens['input_ids'])):
if return_overflowing_tokens:
original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]
else:
original_index = batch_index
labels_example = []
for id, offset, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_tokens['offset_mapping'][batch_index], sanitized_encodings[batch_index].word_ids):
if word_id is not None:
if self.only_label_first_subword:
if offset[0] == 0:
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
else:
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
labels.append(labels_example)
sanitized_tokens['labels'] = labels
if not return_offsets_mapping:
del sanitized_tokens['offset_mapping']
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
batched_input = [(text, text_pair)] if text_pair else [text]
batched_boxes = [boxes]
batched_word_labels = [word_labels] if word_labels is not None else None
batched_output = self._batch_encode_plus(batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
if return_tensors is None and (not return_overflowing_tokens):
batched_output = BatchEncoding({key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items()}, batched_output.encodings)
self._eventual_warn_about_too_long_sequence(batched_output['input_ids'], max_length, verbose)
return batched_output
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
if return_attention_mask and 'attention_mask' not in encoded_inputs:
encoded_inputs['attention_mask'] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == 'right':
if return_attention_mask:
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = encoded_inputs['bbox'] + [self.pad_token_box] * difference
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == 'left':
if return_attention_mask:
encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids']
if 'bbox' in encoded_inputs:
encoded_inputs['bbox'] = [self.pad_token_box] * difference + encoded_inputs['bbox']
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels']
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError('Invalid padding strategy:' + str(padding_side))
return encoded_inputs
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM-RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" LayoutXLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
[`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
cls_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
'''
def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs):
pass
@add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, word_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`list[list[int]]`, `list[list[list[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`list[int]`, `list[list[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
'''
pass
def _is_valid_text_input(t):
pass
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]:
pass
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[list[list[list[int]]]]=None, word_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[list[list[int]]]=None, word_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
'''
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM-RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 12
| 5
| 55
| 4
| 41
| 10
| 8
| 0.34
| 1
| 15
| 2
| 0
| 10
| 6
| 10
| 98
| 668
| 70
| 448
| 158
| 335
| 152
| 186
| 57
| 174
| 24
| 3
| 6
| 85
|
3,329
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/configuration_led.py
|
transformers.models.led.configuration_led.LEDConfig
|
from typing import Union
from ...configuration_utils import PretrainedConfig
class LEDConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`LEDModel`]. It is used to instantiate an LED
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LED
[allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the LED model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LEDModel`] or [`TFLEDModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_encoder_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that the encoder might ever be used with.
max_decoder_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that the decoder might ever be used with.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
Example:
```python
>>> from transformers import LEDModel, LEDConfig
>>> # Initializing a LED allenai/led-base-16384 style configuration
>>> configuration = LEDConfig()
>>> # Initializing a model from the allenai/led-base-16384 style configuration
>>> model = LEDModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'led'
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model', 'attention_probs_dropout_prob': 'attention_dropout', 'initializer_range': 'init_std'}
def __init__(self, vocab_size=50265, max_encoder_position_embeddings=16384, max_decoder_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, classifier_dropout=0.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, attention_window: Union[list[int], int]=512, **kwargs):
self.vocab_size = vocab_size
self.max_encoder_position_embeddings = max_encoder_position_embeddings
self.max_decoder_position_embeddings = max_decoder_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.attention_window = attention_window
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs)
|
class LEDConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`LEDModel`]. It is used to instantiate an LED
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LED
[allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the LED model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LEDModel`] or [`TFLEDModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_encoder_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that the encoder might ever be used with.
max_decoder_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that the decoder might ever be used with.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
Example:
```python
>>> from transformers import LEDModel, LEDConfig
>>> # Initializing a LED allenai/led-base-16384 style configuration
>>> configuration = LEDConfig()
>>> # Initializing a model from the allenai/led-base-16384 style configuration
>>> model = LEDModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50265, max_encoder_position_embeddings=16384, max_decoder_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, classifier_dropout=0.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, attention_window: Union[list[int], int]=512, **kwargs):
pass
| 2
| 1
| 59
| 1
| 58
| 0
| 1
| 0.91
| 1
| 2
| 0
| 0
| 1
| 21
| 1
| 1
| 137
| 11
| 66
| 53
| 36
| 60
| 26
| 25
| 24
| 1
| 1
| 0
| 1
|
3,330
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDClassificationHead
|
import torch
from torch import nn
class LEDClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
|
class LEDClassificationHead(nn.Module):
'''Head for sentence-level classification tasks.'''
def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 1
| 9
| 0
| 9
| 0
| 1
| 0.05
| 1
| 4
| 0
| 0
| 2
| 3
| 2
| 12
| 22
| 2
| 19
| 12
| 10
| 1
| 13
| 6
| 10
| 1
| 1
| 0
| 2
|
3,331
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDDecoder
|
from torch import nn
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
from .configuration_led import LEDConfig
from typing import Optional, Union
from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask
class LEDDecoder(LEDPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`LEDDecoderLayer`]
Args:
config: LEDConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding]=None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_decoder_position_embeddings
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = LEDLearnedPositionalEmbedding(self.max_target_positions, config.d_model)
self.layers = nn.ModuleList([LEDDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.post_init()
def forward(self, input_ids=None, attention_mask=None, global_attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None):
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention. Tokens with
global attention attends to all other tokens, and all other tokens attend to them. This is important
for task-specific finetuning because it makes the model more flexible at representing the task. For
example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _create_4d_causal_attention_mask(input_shape, inputs_embeds.dtype, inputs_embeds.device, past_key_values_length=past_key_values_length)
if attention_mask is not None and combined_attention_mask is not None:
combined_attention_mask = combined_attention_mask + _prepare_4d_attention_mask_inverted(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
if encoder_hidden_states is not None and encoder_attention_mask is not None:
encoder_attention_mask = _prepare_4d_attention_mask_inverted(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, combined_attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
all_cross_attentions += (layer_outputs[2],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
|
class LEDDecoder(LEDPreTrainedModel):
'''
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`LEDDecoderLayer`]
Args:
config: LEDConfig
embed_tokens (nn.Embedding): output embedding
'''
def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding]=None):
pass
def forward(self, input_ids=None, attention_mask=None, global_attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None):
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention. Tokens with
global attention attends to all other tokens, and all other tokens attend to them. This is important
for task-specific finetuning because it makes the model more flexible at representing the task. For
example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 126
| 18
| 72
| 37
| 20
| 0.56
| 1
| 10
| 4
| 0
| 2
| 9
| 2
| 4
| 262
| 38
| 144
| 42
| 126
| 80
| 75
| 27
| 72
| 38
| 2
| 3
| 40
|
3,332
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDDecoderAttention
|
from torch import nn
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...utils.deprecation import deprecate_kwarg
class LEDDecoderAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, layer_idx: Optional[bool]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
query_states = self.q_proj(hidden_states) * self.scaling
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}')
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}')
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}')
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim).transpose(1, 2).reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights_reshaped, past_key_values)
|
class LEDDecoderAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, layer_idx: Optional[bool]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 45
| 5
| 33
| 7
| 5
| 0.21
| 1
| 6
| 0
| 0
| 3
| 10
| 3
| 13
| 141
| 19
| 101
| 40
| 82
| 21
| 64
| 25
| 60
| 12
| 1
| 2
| 15
|
3,333
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDDecoderLayer
|
from torch import nn
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from .configuration_led import LEDConfig
from ...activations import ACT2FN
from typing import Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
from ...utils.deprecation import deprecate_kwarg
class LEDDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: LEDConfig, layer_idx=None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = LEDDecoderAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = LEDDecoderAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape *(batch, seq_len, embed_dim)*
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(decoder_attention_heads,)*.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size *(decoder_attention_heads,)*.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`): Whether the base model outputs attentions.
This requires the attentions tensor to be reshaped in this function.
"""
residual = hidden_states
hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (past_key_values,)
return outputs
|
class LEDDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: LEDConfig, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None):
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape *(batch, seq_len, embed_dim)*
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(decoder_attention_heads,)*.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size *(decoder_attention_heads,)*.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`): Whether the base model outputs attentions.
This requires the attentions tensor to be reshaped in this function.
'''
pass
| 4
| 1
| 56
| 6
| 39
| 12
| 4
| 0.31
| 1
| 5
| 2
| 0
| 2
| 11
| 2
| 12
| 114
| 12
| 78
| 32
| 64
| 24
| 44
| 21
| 41
| 6
| 1
| 1
| 7
|
3,334
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDEncoder
|
from .configuration_led import LEDConfig
import torch
from typing import Optional, Union
from torch import nn
class LEDEncoder(LEDPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self-attention layers. Each layer is a
[`LEDEncoderLayer`].
Args:
config: LEDConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding]=None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_encoder_position_embeddings
if isinstance(config.attention_window, int):
if config.attention_window % 2 != 0:
raise ValueError('`config.attention_window` has to be an even value')
if config.attention_window <= 0:
raise ValueError('`config.attention_window` has to be positive')
config.attention_window = [config.attention_window] * config.num_hidden_layers
elif len(config.attention_window) != config.num_hidden_layers:
raise ValueError(f'`len(config.attention_window)` should equal `config.num_hidden_layers`. Expected {config.num_hidden_layers}, given {len(config.attention_window)}')
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = LEDLearnedPositionalEmbedding(self.max_source_positions, embed_dim)
self.layers = nn.ModuleList([LEDEncoderLayer(config, i) for i in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.gradient_checkpointing = False
self.post_init()
def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor):
if attention_mask is not None:
attention_mask = attention_mask * (global_attention_mask + 1)
else:
attention_mask = global_attention_mask + 1
return attention_mask
def _pad_to_window_size(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int):
"""A helper function to pad tokens and mask to work with implementation of Longformer self-attention."""
attention_window = self.config.attention_window if isinstance(self.config.attention_window, int) else max(self.config.attention_window)
if attention_window % 2 != 0:
raise ValueError(f'`attention_window` should be an even value. Given {attention_window}')
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len % attention_window) % attention_window
if padding_len > 0:
logger.warning_once(f'Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of `config.attention_window`: {attention_window}')
if input_ids is not None:
input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full((batch_size, padding_len), self.config.pad_token_id, dtype=torch.long)
inputs_embeds_padding = self.embed_tokens(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = nn.functional.pad(attention_mask, (0, padding_len), value=False)
return (padding_len, input_ids, attention_mask, inputs_embeds)
def forward(self, input_ids=None, attention_mask=None, global_attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the
task. For example, for classification, the <s> token should be given global attention. For QA, all
question tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is None and inputs_embeds is None:
raise ValueError('You have to specify either input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if attention_mask is None:
attention_mask = torch.ones(inputs_embeds.size()[:-1], device=inputs_embeds.device, dtype=torch.long)
if global_attention_mask is not None:
attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)
padding_len, input_ids, attention_mask, inputs_embeds = self._pad_to_window_size(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id)
if input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
if attention_mask is not None:
attention_mask = _prepare_4d_attention_mask_inverted(attention_mask, inputs_embeds.dtype)[:, 0, 0, :]
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_global_attentions = () if output_attentions and is_global_attn else None
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
dropout_probability = torch.rand([])
if self.training and dropout_probability < self.layerdrop:
layer_outputs = (None, None, None)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),)
if is_global_attn:
all_global_attentions = all_global_attentions + (layer_outputs[2].transpose(2, 3),)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if padding_len > 0:
hidden_states = hidden_states[:, :-padding_len]
if output_hidden_states:
encoder_states = tuple((state[:, :-padding_len] for state in encoder_states))
if output_attentions:
all_attentions = tuple((state[:, :, :-padding_len, :] for state in all_attentions))
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions, all_global_attentions] if v is not None))
return LEDEncoderBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions, global_attentions=all_global_attentions)
|
class LEDEncoder(LEDPreTrainedModel):
'''
Transformer encoder consisting of *config.encoder_layers* self-attention layers. Each layer is a
[`LEDEncoderLayer`].
Args:
config: LEDConfig
embed_tokens (nn.Embedding): output embedding
'''
def __init__(self, config: LEDConfig, embed_tokens: Optional[nn.Embedding]=None):
pass
def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor):
pass
def _pad_to_window_size(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int):
'''A helper function to pad tokens and mask to work with implementation of Longformer self-attention.'''
pass
def forward(self, input_ids=None, attention_mask=None, global_attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the
task. For example, for classification, the <s> token should be given global attention. For QA, all
question tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 5
| 3
| 69
| 9
| 45
| 16
| 11
| 0.39
| 1
| 11
| 4
| 0
| 4
| 9
| 4
| 6
| 288
| 40
| 180
| 50
| 159
| 71
| 104
| 34
| 99
| 30
| 2
| 3
| 45
|
3,335
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDEncoderAttention
|
import torch
from typing import Optional, Union
from torch import nn
class LEDEncoderAttention(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
self.longformer_self_attn = LEDEncoderSelfAttention(config, layer_id=layer_id)
self.output = nn.Linear(config.d_model, config.d_model)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, is_index_masked: Optional[torch.Tensor]=None, is_index_global_attn: Optional[torch.Tensor]=None, is_global_attn: Optional[bool]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
self_outputs = self.longformer_self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions)
attn_output = self.output(self_outputs[0])
outputs = (attn_output,) + self_outputs[1:]
return outputs
|
class LEDEncoderAttention(nn.Module):
def __init__(self, config, layer_id):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, is_index_masked: Optional[torch.Tensor]=None, is_index_global_attn: Optional[torch.Tensor]=None, is_global_attn: Optional[bool]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 1
| 15
| 2
| 13
| 1
| 1
| 0.04
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 32
| 4
| 27
| 17
| 15
| 1
| 10
| 8
| 7
| 1
| 1
| 0
| 2
|
3,336
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDEncoderBaseModelOutput
|
from ...utils import ModelOutput, auto_docstring, logging
from dataclasses import dataclass
import torch
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro="\n Base class for LEDEncoder's outputs, with potential hidden states, local and global attentions.\n ")
class LEDEncoderBaseModelOutput(ModelOutput):
"""
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Base class for LEDEncoder's outputs, with potential hidden states, local and global attentions.\n ")
class LEDEncoderBaseModelOutput(ModelOutput):
'''
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 5
| 5
| 4
| 4
| 30
| 5
| 4
| 4
| 0
| 1
| 0
| 0
|
3,337
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDEncoderLayer
|
from torch import nn
import torch
from .configuration_led import LEDConfig
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
class LEDEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: LEDConfig, layer_id: int):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = LEDEncoderAttention(config, layer_id)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*.
"""
residual = hidden_states
attn_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions)
hidden_states = attn_outputs[0]
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return (hidden_states,) + attn_outputs[1:]
|
class LEDEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: LEDConfig, layer_id: int):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False):
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*.
'''
pass
| 3
| 1
| 29
| 1
| 24
| 4
| 2
| 0.16
| 1
| 5
| 2
| 0
| 2
| 9
| 2
| 12
| 60
| 3
| 49
| 24
| 37
| 8
| 30
| 15
| 27
| 2
| 1
| 1
| 3
|
3,338
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDEncoderSelfAttention
|
import math
import torch
from torch import nn
class LEDEncoderSelfAttention(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_heads = config.num_attention_heads
self.head_dim = int(config.hidden_size / config.num_attention_heads)
self.embed_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.embed_dim)
self.key = nn.Linear(config.hidden_size, self.embed_dim)
self.value = nn.Linear(config.hidden_size, self.embed_dim)
self.query_global = nn.Linear(config.hidden_size, self.embed_dim)
self.key_global = nn.Linear(config.hidden_size, self.embed_dim)
self.value_global = nn.Linear(config.hidden_size, self.embed_dim)
self.dropout = config.attention_probs_dropout_prob
self.layer_id = layer_id
attention_window = config.attention_window[self.layer_id]
assert attention_window % 2 == 0, f'`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}'
assert attention_window > 0, f'`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}'
self.one_sided_attn_window_size = attention_window // 2
self.config = config
def forward(self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False):
"""
[`LEDEncoderSelfAttention`] expects *len(hidden_states)* to be multiple of *attention_window*. Padding to
*attention_window* happens in [`LEDEncoderModel.forward`] to avoid redoing the padding on each layer.
The *attention_mask* is changed in [`LEDEncoderModel.forward`] from 0, 1, 2 to:
- -10000: no attention
- 0: local attention
- +10000: global attention
"""
hidden_states = hidden_states.transpose(0, 1)
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
seq_len, batch_size, embed_dim = hidden_states.size()
assert embed_dim == self.embed_dim, f'hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}'
query_vectors /= math.sqrt(self.head_dim)
query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
attn_scores = self._sliding_chunks_query_key_matmul(query_vectors, key_vectors, self.one_sided_attn_window_size)
remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None]
float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(remove_from_windowed_attention_mask, torch.finfo(query_vectors.dtype).min)
diagonal_mask = self._sliding_chunks_query_key_matmul(float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size)
attn_scores += diagonal_mask
assert list(attn_scores.size()) == [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1], f'local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}'
if is_global_attn:
max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero = self._get_global_attn_indices(is_index_global_attn)
global_key_attn_scores = self._concat_with_global_key_attn_probs(query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero)
attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1)
del global_key_attn_scores
attn_probs = nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32)
if layer_head_mask is not None:
assert layer_head_mask.size() == (self.num_heads,), f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}'
attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs
attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0)
attn_probs = attn_probs.type_as(attn_scores)
del attn_scores
attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
if is_global_attn:
attn_output = self._compute_attn_output_with_global_indices(value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero)
else:
attn_output = self._sliding_chunks_matmul_attn_probs_value(attn_probs, value_vectors, self.one_sided_attn_window_size)
assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), 'Unexpected size'
attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous()
if is_global_attn:
global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, layer_head_mask=layer_head_mask, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked)
nonzero_global_attn_output = global_attn_output[is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]]
attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(len(is_local_index_global_attn_nonzero[0]), -1)
attn_probs[is_index_global_attn_nonzero] = 0
outputs = (attn_output.transpose(0, 1),)
if output_attentions:
outputs += (attn_probs,)
return outputs + (global_attn_probs,) if is_global_attn and output_attentions else outputs
@staticmethod
def _pad_and_transpose_last_two_dims(hidden_states_padded, padding):
"""pads rows and then flips rows and columns"""
hidden_states_padded = nn.functional.pad(hidden_states_padded, padding)
hidden_states_padded = hidden_states_padded.view(*hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2))
return hidden_states_padded
@staticmethod
def _pad_and_diagonalize(chunked_hidden_states):
"""
shift every row 1 step right, converting columns into diagonals.
Example:
```python
chunked_hidden_states: [
0.4983,
2.6918,
-0.0071,
1.0492,
-1.8348,
0.7672,
0.2986,
0.0285,
-0.7584,
0.4206,
-0.0405,
0.1599,
2.0514,
-1.1600,
0.5372,
0.2629,
]
window_overlap = num_rows = 4
```
(pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206,
-0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size()
chunked_hidden_states = nn.functional.pad(chunked_hidden_states, (0, window_overlap + 1))
chunked_hidden_states = chunked_hidden_states.view(total_num_heads, num_chunks, -1)
chunked_hidden_states = chunked_hidden_states[:, :, :-window_overlap]
chunked_hidden_states = chunked_hidden_states.view(total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim)
chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
return chunked_hidden_states
@staticmethod
def _chunk(hidden_states, window_overlap, onnx_export: bool=False):
"""convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
if not onnx_export:
hidden_states = hidden_states.view(hidden_states.size(0), torch.div(hidden_states.size(1), window_overlap * 2, rounding_mode='trunc'), window_overlap * 2, hidden_states.size(2))
chunk_size = list(hidden_states.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(hidden_states.stride())
chunk_stride[1] = chunk_stride[1] // 2
return hidden_states.as_strided(size=chunk_size, stride=chunk_stride)
chunk_size = [hidden_states.size(0), torch.div(hidden_states.size(1), window_overlap, rounding_mode='trunc') - 1, window_overlap * 2, hidden_states.size(2)]
overlapping_chunks = torch.empty(chunk_size, device=hidden_states.device)
for chunk in range(chunk_size[1]):
overlapping_chunks[:, chunk, :, :] = hidden_states[:, chunk * window_overlap:chunk * window_overlap + 2 * window_overlap, :]
return overlapping_chunks
@staticmethod
def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor:
beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0])
beginning_mask = beginning_mask_2d[None, :, None, :]
ending_mask = beginning_mask.flip(dims=(1, 3))
beginning_input = input_tensor[:, :affected_seq_len, :, :affected_seq_len + 1]
beginning_mask = beginning_mask.expand(beginning_input.size())
input_tensor[:, :affected_seq_len, :, :affected_seq_len + 1] = torch.full_like(beginning_input, -float('inf')).where(beginning_mask.bool(), beginning_input)
ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):]
ending_mask = ending_mask.expand(ending_input.size())
input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):] = torch.full_like(ending_input, -float('inf')).where(ending_mask.bool(), ending_input)
def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):
"""
Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained LEDEncoder) with an
overlap of size window_overlap
"""
batch_size, seq_len, num_heads, head_dim = query.size()
assert seq_len % (window_overlap * 2) == 0, f'Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}'
assert query.size() == key.size()
chunks_count = torch.div(seq_len, window_overlap, rounding_mode='trunc') - 1
query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
query = self._chunk(query, window_overlap, getattr(self.config, 'onnx_export', False))
key = self._chunk(key, window_overlap, getattr(self.config, 'onnx_export', False))
diagonal_chunked_attention_scores = torch.einsum('bcxd,bcyd->bcxy', (query, key))
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(diagonal_chunked_attention_scores, padding=(0, 0, 0, 1))
diagonal_attention_scores = diagonal_chunked_attention_scores.new_zeros((batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1))
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[:, :, :window_overlap, :window_overlap + 1]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[:, -1, window_overlap:, :window_overlap + 1]
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[:, :, -(window_overlap + 1):-1, window_overlap + 1:]
diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[:, 0, :window_overlap - 1, 1 - window_overlap:]
diagonal_attention_scores = diagonal_attention_scores.view(batch_size, num_heads, seq_len, 2 * window_overlap + 1).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
def _sliding_chunks_matmul_attn_probs_value(self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int):
"""
Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
same shape as `attn_probs`
"""
batch_size, seq_len, num_heads, head_dim = value.size()
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size()[:3] == value.size()[:3]
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = torch.div(seq_len, window_overlap, rounding_mode='trunc') - 1
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(batch_size * num_heads, torch.div(seq_len, window_overlap, rounding_mode='trunc'), window_overlap, 2 * window_overlap + 1)
value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
padded_value = nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1)
chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
chunked_value_stride = padded_value.stride()
chunked_value_stride = (chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2])
chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = torch.einsum('bcwd,bcdh->bcwh', (chunked_attn_probs, chunked_value))
return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2)
@staticmethod
def _get_global_attn_indices(is_index_global_attn):
"""compute global attn indices required throughout forward pass"""
num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
max_num_global_attn_indices = num_global_attn_indices.max()
is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True)
is_local_index_global_attn = torch.arange(max_num_global_attn_indices, device=is_index_global_attn.device) < num_global_attn_indices.unsqueeze(dim=-1)
is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True)
is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True)
return (max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero)
def _concat_with_global_key_attn_probs(self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero):
batch_size = key_vectors.shape[0]
key_vectors_only_global = key_vectors.new_zeros(batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim)
key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero]
attn_probs_from_global_key = torch.einsum('blhd,bshd->blhs', (query_vectors, key_vectors_only_global))
attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3)
attn_probs_from_global_key[is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :] = torch.finfo(attn_probs_from_global_key.dtype).min
attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3)
return attn_probs_from_global_key
def _compute_attn_output_with_global_indices(self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero):
batch_size = attn_probs.shape[0]
attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices)
value_vectors_only_global = value_vectors.new_zeros(batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim)
value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero]
attn_output_only_global = torch.matmul(attn_probs_only_global.transpose(1, 2).clone(), value_vectors_only_global.transpose(1, 2).clone()).transpose(1, 2)
attn_probs_without_global = attn_probs.narrow(-1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices).contiguous()
attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(attn_probs_without_global, value_vectors, self.one_sided_attn_window_size)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output_from_hidden(self, hidden_states, max_num_global_attn_indices, layer_head_mask, is_local_index_global_attn_nonzero, is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, is_index_masked):
seq_len, batch_size = hidden_states.shape[:2]
global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim)
global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[is_index_global_attn_nonzero[::-1]]
global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
global_key_vectors = self.key_global(hidden_states)
global_value_vectors = self.value_global(hidden_states)
global_query_vectors_only_global /= math.sqrt(self.head_dim)
global_query_vectors_only_global = global_query_vectors_only_global.contiguous().view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
global_key_vectors = global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
global_value_vectors = global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2))
assert list(global_attn_scores.size()) == [batch_size * self.num_heads, max_num_global_attn_indices, seq_len], f'global_attn_scores have the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is {global_attn_scores.size()}.'
global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_scores = global_attn_scores.transpose(1, 2)
global_attn_scores[is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :] = torch.finfo(global_attn_scores.dtype).min
global_attn_scores = global_attn_scores.transpose(1, 2)
global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :], torch.finfo(global_attn_scores.dtype).min)
global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_probs_float = nn.functional.softmax(global_attn_scores, dim=-1, dtype=torch.float32)
if layer_head_mask is not None:
assert layer_head_mask.size() == (self.num_heads,), f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}'
global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_probs_float = global_attn_probs_float.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_probs = nn.functional.dropout(global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training)
global_attn_output = torch.bmm(global_attn_probs, global_value_vectors)
assert list(global_attn_output.size()) == [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim], f'global_attn_output tensor has the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is {global_attn_output.size()}.'
global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_output = global_attn_output.view(batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim)
return (global_attn_output, global_attn_probs)
| null | 18
| 7
| 51
| 8
| 33
| 11
| 2
| 0.33
| 1
| 8
| 0
| 0
| 7
| 13
| 12
| 22
| 633
| 101
| 407
| 131
| 354
| 135
| 191
| 88
| 178
| 7
| 1
| 1
| 22
|
3,339
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDForConditionalGeneration
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...generation import GenerationMixin
from ...utils import ModelOutput, auto_docstring, logging
from torch import nn
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from .configuration_led import LEDConfig
from typing import Optional, Union
@auto_docstring(custom_intro='\n The LED Model with a language modeling head. Can be used for summarization.\n ')
class LEDForConditionalGeneration(LEDPreTrainedModel, GenerationMixin):
base_model_prefix = 'led'
_keys_to_ignore_on_load_missing = ['final_logits_bias']
_tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight', 'lm_head.weight']
def __init__(self, config: LEDConfig):
super().__init__(config)
self.led = LEDModel(config)
self.register_buffer('final_logits_bias', torch.zeros((1, self.led.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.led.shared.num_embeddings, bias=False)
self.post_init()
def get_encoder(self):
return self.led.get_encoder()
def get_decoder(self):
return self.led.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer('final_logits_bias', new_bias)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, global_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], LEDSeq2SeqLMOutput]:
"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example Summarization:
```python
>>> import torch
>>> from transformers import AutoTokenizer, LEDForConditionalGeneration
>>> model = LEDForConditionalGeneration.from_pretrained("allenai/led-large-16384-arxiv")
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/led-large-16384-arxiv")
>>> ARTICLE_TO_SUMMARIZE = '''Transformers (Vaswani et al., 2017) have achieved state-of-the-art
... results in a wide range of natural language tasks including generative language modeling
... (Dai et al., 2019; Radford et al., 2019) and discriminative ... language understanding (Devlin et al., 2019).
... This success is partly due to the self-attention component which enables the network to capture contextual
... information from the entire sequence. While powerful, the memory and computational requirements of
... self-attention grow quadratically with sequence length, making it infeasible (or very expensive) to
... process long sequences. To address this limitation, we present Longformer, a modified Transformer
... architecture with a self-attention operation that scales linearly with the sequence length, making it
... versatile for processing long documents (Fig 1). This is an advantage for natural language tasks such as
... long document classification, question answering (QA), and coreference resolution, where existing approaches
... partition or shorten the long context into smaller sequences that fall within the typical 512 token limit
... of BERT-style pretrained models. Such partitioning could potentially result in loss of important
... cross-partition information, and to mitigate this problem, existing methods often rely on complex
... architectures to address such interactions. On the other hand, our proposed Longformer is able to build
... contextual representations of the entire context using multiple layers of attention, reducing the need for
... task-specific architectures.'''
>>> inputs = tokenizer.encode(ARTICLE_TO_SUMMARIZE, return_tensors="pt")
>>> # Global attention on the first token (cf. Beltagy et al. 2020)
>>> global_attention_mask = torch.zeros_like(inputs)
>>> global_attention_mask[:, 0] = 1
>>> # Generate Summary
>>> summary_ids = model.generate(inputs, global_attention_mask=global_attention_mask, num_beams=3, max_length=32)
>>> print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True))
```
Example Conditional generation :
```python
>>> from transformers import AutoTokenizer, LEDForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/led-base-16384")
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = LEDForConditionalGeneration.from_pretrained("allenai/led-base-16384")
>>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]
>>> prediction = model.generate(input_ids)[0]
>>> print(tokenizer.decode(prediction, skip_special_tokens=True))
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
outputs = self.led(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, global_attention_mask=global_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return LEDSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, encoder_global_attentions=outputs.encoder_global_attentions)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
|
@auto_docstring(custom_intro='\n The LED Model with a language modeling head. Can be used for summarization.\n ')
class LEDForConditionalGeneration(LEDPreTrainedModel, GenerationMixin):
def __init__(self, config: LEDConfig):
pass
def get_encoder(self):
pass
def get_decoder(self):
pass
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding:
pass
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, global_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], LEDSeq2SeqLMOutput]:
'''
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example Summarization:
```python
>>> import torch
>>> from transformers import AutoTokenizer, LEDForConditionalGeneration
>>> model = LEDForConditionalGeneration.from_pretrained("allenai/led-large-16384-arxiv")
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/led-large-16384-arxiv")
>>> ARTICLE_TO_SUMMARIZE = '''Transformers (Vaswani et al., 2017) have achieved state-of-the-art
... results in a wide range of natural language tasks including generative language modeling
... (Dai et al., 2019; Radford et al., 2019) and discriminative ... language understanding (Devlin et al., 2019).
... This success is partly due to the self-attention component which enables the network to capture contextual
... information from the entire sequence. While powerful, the memory and computational requirements of
... self-attention grow quadratically with sequence length, making it infeasible (or very expensive) to
... process long sequences. To address this limitation, we present Longformer, a modified Transformer
... architecture with a self-attention operation that scales linearly with the sequence length, making it
... versatile for processing long documents (Fig 1). This is an advantage for natural language tasks such as
... long document classification, question answering (QA), and coreference resolution, where existing approaches
... partition or shorten the long context into smaller sequences that fall within the typical 512 token limit
... of BERT-style pretrained models. Such partitioning could potentially result in loss of important
... cross-partition information, and to mitigate this problem, existing methods often rely on complex
... architectures to address such interactions. On the other hand, our proposed Longformer is able to build
... contextual representations of the entire context using multiple layers of attention, reducing the need for
... task-specific architectures.'''
>>> inputs = tokenizer.encode(ARTICLE_TO_SUMMARIZE, return_tensors="pt")
>>> # Global attention on the first token (cf. Beltagy et al. 2020)
>>> global_attention_mask = torch.zeros_like(inputs)
>>> global_attention_mask[:, 0] = 1
>>> # Generate Summary
>>> summary_ids = model.generate(inputs, global_attention_mask=global_attention_mask, num_beams=3, max_length=32)
>>> print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True))
```
Example Conditional generation :
```python
>>> from transformers import AutoTokenizer, LEDForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/led-base-16384")
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = LEDForConditionalGeneration.from_pretrained("allenai/led-base-16384")
>>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]
>>> prediction = model.generate(input_ids)[0]
>>> print(tokenizer.decode(prediction, skip_special_tokens=True))
```
'''
pass
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
pass
| 10
| 1
| 14
| 1
| 11
| 2
| 2
| 0.16
| 2
| 8
| 3
| 0
| 9
| 3
| 10
| 12
| 153
| 22
| 113
| 51
| 77
| 18
| 54
| 27
| 43
| 8
| 2
| 2
| 19
|
3,340
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDForQuestionAnswering
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import ModelOutput, auto_docstring, logging
from torch import nn
import torch
from typing import Optional, Union
@auto_docstring
class LEDForQuestionAnswering(LEDPreTrainedModel):
_tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight']
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.led = LEDModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, global_attention_mask: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], LEDSeq2SeqQuestionAnsweringModelOutput]:
"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if start_positions is not None and end_positions is not None:
use_cache = False
outputs = self.led(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return (total_loss,) + output if total_loss is not None else output
return LEDSeq2SeqQuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, encoder_global_attentions=outputs.encoder_global_attentions)
|
@auto_docstring
class LEDForQuestionAnswering(LEDPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, global_attention_mask: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], LEDSeq2SeqQuestionAnsweringModelOutput]:
'''
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
'''
pass
| 5
| 1
| 54
| 5
| 42
| 7
| 5
| 0.14
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 4
| 117
| 12
| 92
| 37
| 64
| 13
| 36
| 17
| 33
| 8
| 2
| 2
| 9
|
3,341
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDForSequenceClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import warnings
from ...utils import ModelOutput, auto_docstring, logging
import torch
from .configuration_led import LEDConfig
from typing import Optional, Union
@auto_docstring(custom_intro='\n LED model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ')
class LEDForSequenceClassification(LEDPreTrainedModel):
_tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight']
def __init__(self, config: LEDConfig, **kwargs):
warnings.warn('The `transformers.LEDForSequenceClassification` class is deprecated and will be removed in version 5 of Transformers. No actual method were provided in the original paper on how to perform sequence classification.', FutureWarning)
super().__init__(config, **kwargs)
self.led = LEDModel(config)
self.classification_head = LEDClassificationHead(config.d_model, config.d_model, config.num_labels, config.classifier_dropout)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, global_attention_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], LEDSeq2SeqSequenceClassifierOutput]:
"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}')
outputs = self.led(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = outputs[0]
eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device)
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
raise ValueError('All examples must have the same number of <eos> tokens.')
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[:, -1, :]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = 'regression'
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return LEDSeq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, encoder_global_attentions=outputs.encoder_global_attentions)
|
@auto_docstring(custom_intro='\n LED model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ')
class LEDForSequenceClassification(LEDPreTrainedModel):
def __init__(self, config: LEDConfig, **kwargs):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, global_attention_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], LEDSeq2SeqSequenceClassifierOutput]:
'''
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 59
| 4
| 52
| 4
| 8
| 0.06
| 1
| 11
| 4
| 0
| 2
| 2
| 2
| 4
| 127
| 10
| 111
| 33
| 84
| 7
| 41
| 14
| 38
| 15
| 2
| 3
| 16
|
3,342
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDLearnedPositionalEmbedding
|
import torch
from torch import nn
class LEDLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
super().__init__(num_embeddings, embedding_dim)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device)
return super().forward(positions)
|
class LEDLearnedPositionalEmbedding(nn.Embedding):
'''
This module learns positional embeddings up to a fixed maximum size.
'''
def __init__(self, num_embeddings: int, embedding_dim: int):
pass
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0):
'''`input_ids_shape` is expected to be [bsz x seqlen].'''
pass
| 3
| 2
| 5
| 0
| 4
| 1
| 1
| 0.44
| 1
| 2
| 0
| 0
| 2
| 0
| 2
| 2
| 15
| 2
| 9
| 5
| 6
| 4
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
3,343
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDModel
|
from ...utils import ModelOutput, auto_docstring, logging
from torch import nn
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from .configuration_led import LEDConfig
from typing import Optional, Union
@auto_docstring
class LEDModel(LEDPreTrainedModel):
_tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight']
def __init__(self, config: LEDConfig):
super().__init__(config)
padding_idx, vocab_size = (config.pad_token_id, config.vocab_size)
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = LEDEncoder(config, self.shared)
self.decoder = LEDDecoder(config, self.shared)
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, global_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], LEDSeq2SeqModelOutput]:
"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id, self.config.decoder_start_token_id)
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, LEDEncoderBaseModelOutput)):
encoder_outputs = LEDEncoderBaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, global_attentions=encoder_outputs[3] if len(encoder_outputs) > 3 else None)
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
if not return_dict:
return decoder_outputs + encoder_outputs
return LEDSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, encoder_global_attentions=encoder_outputs.global_attentions)
|
@auto_docstring
class LEDModel(LEDPreTrainedModel):
def __init__(self, config: LEDConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_encoder(self):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, global_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], LEDSeq2SeqModelOutput]:
'''
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
'''
pass
| 8
| 1
| 18
| 1
| 15
| 1
| 3
| 0.06
| 1
| 8
| 5
| 0
| 6
| 3
| 6
| 8
| 119
| 14
| 99
| 32
| 68
| 6
| 33
| 13
| 26
| 12
| 2
| 1
| 17
|
3,344
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDPreTrainedModel
|
from ...utils import ModelOutput, auto_docstring, logging
from torch import nn
from ...modeling_utils import PreTrainedModel
import torch
from .configuration_led import LEDConfig
@auto_docstring
class LEDPreTrainedModel(PreTrainedModel):
config: LEDConfig
base_model_prefix = 'led'
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids}
return dummy_inputs
|
@auto_docstring
class LEDPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
@property
def dummy_inputs(self):
pass
| 5
| 0
| 9
| 0
| 9
| 0
| 3
| 0
| 1
| 0
| 0
| 6
| 2
| 0
| 2
| 2
| 25
| 2
| 23
| 11
| 19
| 0
| 18
| 10
| 15
| 5
| 1
| 2
| 6
|
3,345
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDSeq2SeqLMOutput
|
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Base class for sequence-to-sequence language models outputs.\n ')
class LEDSeq2SeqLMOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for sequence-to-sequence language models outputs.\n ')
class LEDSeq2SeqLMOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 9
| 11
| 11
| 10
| 44
| 11
| 11
| 10
| 0
| 1
| 0
| 0
|
3,346
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDSeq2SeqModelOutput
|
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro="\n Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential\n decoding.\n ")
class LEDSeq2SeqModelOutput(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential\n decoding.\n ")
class LEDSeq2SeqModelOutput(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 10
| 10
| 10
| 9
| 45
| 10
| 10
| 9
| 0
| 1
| 0
| 0
|
3,347
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput
|
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of sequence-to-sequence question answering models.\n ')
class LEDSeq2SeqQuestionAnsweringModelOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: Optional[torch.FloatTensor] = None
end_logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of sequence-to-sequence question answering models.\n ')
class LEDSeq2SeqQuestionAnsweringModelOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.83
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 9
| 12
| 12
| 11
| 46
| 12
| 12
| 11
| 0
| 1
| 0
| 0
|
3,348
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/modeling_led.py
|
transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput
|
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of sequence-to-sequence sentence classification models.\n ')
class LEDSeq2SeqSequenceClassifierOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of sequence-to-sequence sentence classification models.\n ')
class LEDSeq2SeqSequenceClassifierOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see `past_key_values` input) to speed up sequential decoding.
encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 9
| 11
| 11
| 10
| 44
| 11
| 11
| 10
| 0
| 1
| 0
| 0
|
3,349
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/tokenization_led.py
|
transformers.models.led.tokenization_led.LEDTokenizer
|
from ...utils import PaddingStrategy, logging
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
import os
from typing import Optional, Union
import regex as re
import json
from ...tokenization_utils_base import BatchEncoding, EncodedInput
class LEDTokenizer(PreTrainedTokenizer):
"""
Constructs a LED tokenizer, which is smilar to the ROBERTa tokenizer, using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import LEDTokenizer
>>> tokenizer = LEDTokenizer.from_pretrained("allenai/led-base-16384")
>>> tokenizer("Hello world")["input_ids"]
[0, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[0, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (BART tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write(' '.join(bpe_tokens) + '\n')
index += 1
return (vocab_file, merge_file)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A LED sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())):
text = ' ' + text
return (text, kwargs)
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
encoded_inputs = super()._pad(encoded_inputs=encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
if return_attention_mask and 'global_attention_mask' in encoded_inputs:
required_input = encoded_inputs[self.model_input_names[0]]
needs_to_be_padded = len(encoded_inputs['global_attention_mask']) != len(required_input)
if needs_to_be_padded:
difference = len(required_input) - len(encoded_inputs['global_attention_mask'])
if self.padding_side == 'right':
encoded_inputs['global_attention_mask'] = encoded_inputs['global_attention_mask'] + [-1] * difference
elif self.padding_side == 'left':
encoded_inputs['global_attention_mask'] = [-1] * difference + encoded_inputs['global_attention_mask']
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return encoded_inputs
|
class LEDTokenizer(PreTrainedTokenizer):
'''
Constructs a LED tokenizer, which is smilar to the ROBERTa tokenizer, using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import LEDTokenizer
>>> tokenizer = LEDTokenizer.from_pretrained("allenai/led-base-16384")
>>> tokenizer("Hello world")["input_ids"]
[0, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[0, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (BART tokenizer detect beginning of words by the preceding space).
'''
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def bpe(self, token):
pass
def _tokenize(self, text):
'''Tokenize a string.'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A LED sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
pass
| 16
| 8
| 19
| 2
| 14
| 4
| 3
| 0.6
| 1
| 12
| 1
| 0
| 14
| 9
| 14
| 103
| 374
| 57
| 200
| 82
| 156
| 120
| 134
| 49
| 119
| 9
| 3
| 3
| 45
|
3,350
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/led/tokenization_led_fast.py
|
transformers.models.led.tokenization_led_fast.LEDTokenizerFast
|
from tokenizers import processors
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
import json
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from typing import Optional, Union
from ...tokenization_utils_fast import PreTrainedTokenizerFast
class LEDTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" LED tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer,
using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import LEDTokenizerFast
>>> tokenizer = LEDTokenizerFast.from_pretrained("allenai/led-base-16384")
>>> tokenizer("Hello world")["input_ids"]
[0, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[0, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (LED tokenizer detect beginning of words by the preceding space).
trim_offsets (`bool`, *optional*, defaults to `True`):
Whether the post processing step should trim offsets to avoid including whitespaces.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = LEDTokenizer
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs):
mask_token = AddedToken(mask_token, lstrip=True, normalized=True, special=True) if isinstance(mask_token, str) else mask_token
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs)
tokenizer_component = 'post_processor'
tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
if tokenizer_component_instance:
state = json.loads(tokenizer_component_instance.__getstate__())
if 'sep' in state:
state['sep'] = tuple(state['sep'])
if 'cls' in state:
state['cls'] = tuple(state['cls'])
changes_to_apply = False
if state.get('add_prefix_space', add_prefix_space) != add_prefix_space:
state['add_prefix_space'] = add_prefix_space
changes_to_apply = True
if state.get('trim_offsets', trim_offsets) != trim_offsets:
state['trim_offsets'] = trim_offsets
changes_to_apply = True
if changes_to_apply:
component_class = getattr(processors, state.pop('type'))
new_value = component_class(**state)
setattr(self.backend_tokenizer, tokenizer_component, new_value)
@property
def mask_token(self) -> str:
"""
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
having been set.
LED tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
comprise the space before the *<mask>*.
"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def mask_token(self, value):
"""
Overriding the default behavior of the mask token to have it eat the space before it.
This is needed to preserve backward compatibility with all the previously used models based on LED.
"""
value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
self._mask_token = value
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
if is_split_into_words and (not self.add_prefix_space):
raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.')
return super()._batch_encode_plus(*args, **kwargs)
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
if is_split_into_words and (not self.add_prefix_space):
raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.')
return super()._encode_plus(*args, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return output
return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
encoded_inputs = super()._pad(encoded_inputs=encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
if return_attention_mask and 'global_attention_mask' in encoded_inputs:
required_input = encoded_inputs[self.model_input_names[0]]
needs_to_be_padded = len(encoded_inputs['global_attention_mask']) != len(required_input)
if needs_to_be_padded:
difference = len(required_input) - len(encoded_inputs['global_attention_mask'])
if self.padding_side == 'right':
encoded_inputs['global_attention_mask'] = encoded_inputs['global_attention_mask'] + [-1] * difference
elif self.padding_side == 'left':
encoded_inputs['global_attention_mask'] = [-1] * difference + encoded_inputs['global_attention_mask']
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return encoded_inputs
|
class LEDTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" LED tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer,
using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import LEDTokenizerFast
>>> tokenizer = LEDTokenizerFast.from_pretrained("allenai/led-base-16384")
>>> tokenizer("Hello world")["input_ids"]
[0, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[0, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (LED tokenizer detect beginning of words by the preceding space).
trim_offsets (`bool`, *optional*, defaults to `True`):
Whether the post processing step should trim offsets to avoid including whitespaces.
'''
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs):
pass
@property
def mask_token(self) -> str:
'''
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
having been set.
LED tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
comprise the space before the *<mask>*.
'''
pass
@mask_token.setter
def mask_token(self) -> str:
'''
Overriding the default behavior of the mask token to have it eat the space before it.
This is needed to preserve backward compatibility with all the previously used models based on LED.
'''
pass
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
pass
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
pass
| 12
| 4
| 20
| 2
| 15
| 3
| 3
| 0.71
| 1
| 8
| 1
| 0
| 9
| 1
| 9
| 97
| 286
| 49
| 139
| 57
| 101
| 98
| 73
| 29
| 63
| 8
| 3
| 3
| 28
|
3,351
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/configuration_levit.py
|
transformers.models.levit.configuration_levit.LevitConfig
|
from ...configuration_utils import PretrainedConfig
class LevitConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`LevitModel`]. It is used to instantiate a LeViT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LeViT
[facebook/levit-128S](https://huggingface.co/facebook/levit-128S) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size of the input image.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input image.
kernel_size (`int`, *optional*, defaults to 3):
The kernel size for the initial convolution layers of patch embedding.
stride (`int`, *optional*, defaults to 2):
The stride size for the initial convolution layers of patch embedding.
padding (`int`, *optional*, defaults to 1):
The padding size for the initial convolution layers of patch embedding.
patch_size (`int`, *optional*, defaults to 16):
The patch size for embeddings.
hidden_sizes (`list[int]`, *optional*, defaults to `[128, 256, 384]`):
Dimension of each of the encoder blocks.
num_attention_heads (`list[int]`, *optional*, defaults to `[4, 8, 12]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
depths (`list[int]`, *optional*, defaults to `[4, 4, 4]`):
The number of layers in each encoder block.
key_dim (`list[int]`, *optional*, defaults to `[16, 16, 16]`):
The size of key in each of the encoder blocks.
drop_path_rate (`int`, *optional*, defaults to 0):
The dropout probability for stochastic depths, used in the blocks of the Transformer encoder.
mlp_ratios (`list[int]`, *optional*, defaults to `[2, 2, 2]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
attention_ratios (`list[int]`, *optional*, defaults to `[2, 2, 2]`):
Ratio of the size of the output dimension compared to input dimension of attention layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import LevitConfig, LevitModel
>>> # Initializing a LeViT levit-128S style configuration
>>> configuration = LevitConfig()
>>> # Initializing a model (with random weights) from the levit-128S style configuration
>>> model = LevitModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'levit'
def __init__(self, image_size=224, num_channels=3, kernel_size=3, stride=2, padding=1, patch_size=16, hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, mlp_ratio=[2, 2, 2], attention_ratio=[2, 2, 2], initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.num_channels = num_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.hidden_sizes = hidden_sizes
self.num_attention_heads = num_attention_heads
self.depths = depths
self.key_dim = key_dim
self.drop_path_rate = drop_path_rate
self.patch_size = patch_size
self.attention_ratio = attention_ratio
self.mlp_ratio = mlp_ratio
self.initializer_range = initializer_range
self.down_ops = [['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2]]
|
class LevitConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`LevitModel`]. It is used to instantiate a LeViT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LeViT
[facebook/levit-128S](https://huggingface.co/facebook/levit-128S) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size of the input image.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input image.
kernel_size (`int`, *optional*, defaults to 3):
The kernel size for the initial convolution layers of patch embedding.
stride (`int`, *optional*, defaults to 2):
The stride size for the initial convolution layers of patch embedding.
padding (`int`, *optional*, defaults to 1):
The padding size for the initial convolution layers of patch embedding.
patch_size (`int`, *optional*, defaults to 16):
The patch size for embeddings.
hidden_sizes (`list[int]`, *optional*, defaults to `[128, 256, 384]`):
Dimension of each of the encoder blocks.
num_attention_heads (`list[int]`, *optional*, defaults to `[4, 8, 12]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
depths (`list[int]`, *optional*, defaults to `[4, 4, 4]`):
The number of layers in each encoder block.
key_dim (`list[int]`, *optional*, defaults to `[16, 16, 16]`):
The size of key in each of the encoder blocks.
drop_path_rate (`int`, *optional*, defaults to 0):
The dropout probability for stochastic depths, used in the blocks of the Transformer encoder.
mlp_ratios (`list[int]`, *optional*, defaults to `[2, 2, 2]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
attention_ratios (`list[int]`, *optional*, defaults to `[2, 2, 2]`):
Ratio of the size of the output dimension compared to input dimension of attention layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import LevitConfig, LevitModel
>>> # Initializing a LeViT levit-128S style configuration
>>> configuration = LevitConfig()
>>> # Initializing a model (with random weights) from the levit-128S style configuration
>>> model = LevitModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, image_size=224, num_channels=3, kernel_size=3, stride=2, padding=1, patch_size=16, hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, mlp_ratio=[2, 2, 2], attention_ratio=[2, 2, 2], initializer_range=0.02, **kwargs):
pass
| 2
| 1
| 37
| 0
| 37
| 0
| 1
| 1.21
| 1
| 1
| 0
| 0
| 1
| 15
| 1
| 1
| 95
| 9
| 39
| 35
| 20
| 47
| 19
| 18
| 17
| 1
| 1
| 0
| 1
|
3,352
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/configuration_levit.py
|
transformers.models.levit.configuration_levit.LevitOnnxConfig
|
from packaging import version
from ...onnx import OnnxConfig
from collections import OrderedDict
from collections.abc import Mapping
class LevitOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
@property
def atol_for_validation(self) -> float:
return 0.0001
|
class LevitOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
| 5
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 2
| 0
| 2
| 2
| 14
| 2
| 12
| 6
| 7
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
3,353
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/feature_extraction_levit.py
|
transformers.models.levit.feature_extraction_levit.LevitFeatureExtractor
|
from .image_processing_levit import LevitImageProcessor
from ...utils.import_utils import requires
import warnings
@requires(backends=('vision',))
class LevitFeatureExtractor(LevitImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class LevitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use LevitImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class LevitFeatureExtractor(LevitImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 24
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
3,354
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/image_processing_levit.py
|
transformers.models.levit.image_processing_levit.LevitImageProcessor
|
from typing import Optional, Union
from ...utils.import_utils import requires
from collections.abc import Iterable
from ...image_transforms import get_resize_output_image_size, resize, to_channel_dimension_format
from ...utils import TensorType, filter_out_non_signature_kwargs, logging
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
@requires(backends=('vision',))
class LevitImageProcessor(BaseImageProcessor):
"""
Constructs a LeViT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Wwhether to resize the shortest edge of the input to int(256/224 *`size`). Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 224}`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will
be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest
edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this
value i.e, if height > width, then image will be rescaled to `(size["shortest_edge"] * height / width,
size["shortest_edge"])`. Can be overridden by the `size` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether or not to center crop the input to `(crop_size["height"], crop_size["width"])`. Can be overridden
by the `do_center_crop` parameter in the `preprocess` method.
crop_size (`Dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired image size after `center_crop`. Can be overridden by the `crop_size` parameter in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
`preprocess` method.
image_mean (`list[int]`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`list[int]`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, Iterable[float]]]=IMAGENET_DEFAULT_MEAN, image_std: Optional[Union[float, Iterable[float]]]=IMAGENET_DEFAULT_STD, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'shortest_edge': 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224}
crop_size = get_size_dict(crop_size, param_name='crop_size')
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image.
If size is a dict with keys "width" and "height", the image will be resized to `(size["height"],
size["width"])`.
If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`.
The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled
to `(size["shortest_edge"] * height / width, size["shortest_edge"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
`c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
i.e, if height > width, then image will be rescaled to (size * height / width, size).
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size_dict = get_size_dict(size, default_to_square=False)
if 'shortest_edge' in size:
shortest_edge = int(256 / 224 * size['shortest_edge'])
output_size = get_resize_output_image_size(image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format)
size_dict = {'height': output_size[0], 'width': output_size[1]}
if 'height' not in size_dict or 'width' not in size_dict:
raise ValueError(f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}")
return resize(image, size=(size_dict['height'], size_dict['width']), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, Iterable[float]]]=None, image_std: Optional[Union[float, Iterable[float]]]=None, return_tensors: Optional[TensorType]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
"""
Preprocess an image or batch of images to be used as input to a LeViT model.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
`c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
i.e, if height > width, then image will be rescaled to (size * height / width, size).
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the output image after center cropping. Crops images to (crop_size["height"],
crop_size["width"]).
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image pixel values by `rescaling_factor` - typical to values between 0 and 1.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Factor to rescale the image pixel values by.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image pixel values by `image_mean` and `image_std`.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean to normalize the image pixel values by.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to normalize the image pixel values by.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name='crop_size')
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [self.resize(image, size, resample, input_data_format=input_data_format) for image in images]
if do_center_crop:
images = [self.center_crop(image, crop_size, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
|
@requires(backends=('vision',))
class LevitImageProcessor(BaseImageProcessor):
'''
Constructs a LeViT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Wwhether to resize the shortest edge of the input to int(256/224 *`size`). Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 224}`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will
be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest
edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this
value i.e, if height > width, then image will be rescaled to `(size["shortest_edge"] * height / width,
size["shortest_edge"])`. Can be overridden by the `size` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether or not to center crop the input to `(crop_size["height"], crop_size["width"])`. Can be overridden
by the `do_center_crop` parameter in the `preprocess` method.
crop_size (`Dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired image size after `center_crop`. Can be overridden by the `crop_size` parameter in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
`preprocess` method.
image_mean (`list[int]`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`list[int]`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, Iterable[float]]]=IMAGENET_DEFAULT_MEAN, image_std: Optional[Union[float, Iterable[float]]]=IMAGENET_DEFAULT_STD, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image.
If size is a dict with keys "width" and "height", the image will be resized to `(size["height"],
size["width"])`.
If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`.
The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled
to `(size["shortest_edge"] * height / width, size["shortest_edge"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
`c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
i.e, if height > width, then image will be rescaled to (size * height / width, size).
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, Iterable[float]]]=None, image_std: Optional[Union[float, Iterable[float]]]=None, return_tensors: Optional[TensorType]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
'''
Preprocess an image or batch of images to be used as input to a LeViT model.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
`c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
i.e, if height > width, then image will be rescaled to (size * height / width, size).
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the output image after center cropping. Crops images to (crop_size["height"],
crop_size["width"]).
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image pixel values by `rescaling_factor` - typical to values between 0 and 1.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Factor to rescale the image pixel values by.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image pixel values by `image_mean` and `image_std`.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean to normalize the image pixel values by.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to normalize the image pixel values by.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 6
| 3
| 72
| 5
| 42
| 25
| 9
| 0.85
| 1
| 8
| 2
| 1
| 3
| 10
| 3
| 23
| 261
| 20
| 130
| 57
| 88
| 111
| 60
| 19
| 56
| 18
| 3
| 1
| 26
|
3,355
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitAttention
|
import itertools
import torch
from torch import nn
class LevitAttention(nn.Module):
def __init__(self, hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution):
super().__init__()
self.num_attention_heads = num_attention_heads
self.scale = key_dim ** (-0.5)
self.key_dim = key_dim
self.attention_ratio = attention_ratio
self.out_dim_keys_values = attention_ratio * key_dim * num_attention_heads + key_dim * num_attention_heads * 2
self.out_dim_projection = attention_ratio * key_dim * num_attention_heads
self.queries_keys_values = MLPLayerWithBN(hidden_sizes, self.out_dim_keys_values)
self.activation = nn.Hardswish()
self.projection = MLPLayerWithBN(self.out_dim_projection, hidden_sizes, bn_weight_init=0)
points = list(itertools.product(range(resolution), range(resolution)))
len_points = len(points)
attention_offsets, indices = ({}, [])
for p1 in points:
for p2 in points:
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
indices.append(attention_offsets[offset])
self.attention_bias_cache = {}
self.attention_biases = torch.nn.Parameter(torch.zeros(num_attention_heads, len(attention_offsets)))
self.register_buffer('attention_bias_idxs', torch.LongTensor(indices).view(len_points, len_points), persistent=False)
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {}
def get_attention_biases(self, device):
if self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, hidden_state):
batch_size, seq_length, _ = hidden_state.shape
queries_keys_values = self.queries_keys_values(hidden_state)
query, key, value = queries_keys_values.view(batch_size, seq_length, self.num_attention_heads, -1).split([self.key_dim, self.key_dim, self.attention_ratio * self.key_dim], dim=3)
query = query.permute(0, 2, 1, 3)
key = key.permute(0, 2, 1, 3)
value = value.permute(0, 2, 1, 3)
attention = query @ key.transpose(-2, -1) * self.scale + self.get_attention_biases(hidden_state.device)
attention = attention.softmax(dim=-1)
hidden_state = (attention @ value).transpose(1, 2).reshape(batch_size, seq_length, self.out_dim_projection)
hidden_state = self.projection(self.activation(hidden_state))
return hidden_state
|
class LevitAttention(nn.Module):
def __init__(self, hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution):
pass
@torch.no_grad()
def train(self, mode=True):
pass
def get_attention_biases(self, device):
pass
def forward(self, hidden_state):
pass
| 6
| 0
| 14
| 1
| 13
| 0
| 3
| 0.02
| 1
| 6
| 1
| 0
| 4
| 11
| 4
| 14
| 60
| 7
| 53
| 28
| 47
| 1
| 47
| 27
| 42
| 4
| 1
| 3
| 10
|
3,356
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitAttentionSubsample
|
from torch import nn
import itertools
import torch
class LevitAttentionSubsample(nn.Module):
def __init__(self, input_dim, output_dim, key_dim, num_attention_heads, attention_ratio, stride, resolution_in, resolution_out):
super().__init__()
self.num_attention_heads = num_attention_heads
self.scale = key_dim ** (-0.5)
self.key_dim = key_dim
self.attention_ratio = attention_ratio
self.out_dim_keys_values = attention_ratio * key_dim * num_attention_heads + key_dim * num_attention_heads
self.out_dim_projection = attention_ratio * key_dim * num_attention_heads
self.resolution_out = resolution_out
self.keys_values = MLPLayerWithBN(input_dim, self.out_dim_keys_values)
self.queries_subsample = LevitSubsample(stride, resolution_in)
self.queries = MLPLayerWithBN(input_dim, key_dim * num_attention_heads)
self.activation = nn.Hardswish()
self.projection = MLPLayerWithBN(self.out_dim_projection, output_dim)
self.attention_bias_cache = {}
points = list(itertools.product(range(resolution_in), range(resolution_in)))
points_ = list(itertools.product(range(resolution_out), range(resolution_out)))
len_points, len_points_ = (len(points), len(points_))
attention_offsets, indices = ({}, [])
for p1 in points_:
for p2 in points:
size = 1
offset = (abs(p1[0] * stride - p2[0] + (size - 1) / 2), abs(p1[1] * stride - p2[1] + (size - 1) / 2))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
indices.append(attention_offsets[offset])
self.attention_biases = torch.nn.Parameter(torch.zeros(num_attention_heads, len(attention_offsets)))
self.register_buffer('attention_bias_idxs', torch.LongTensor(indices).view(len_points_, len_points), persistent=False)
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {}
def get_attention_biases(self, device):
if self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, hidden_state):
batch_size, seq_length, _ = hidden_state.shape
key, value = self.keys_values(hidden_state).view(batch_size, seq_length, self.num_attention_heads, -1).split([self.key_dim, self.attention_ratio * self.key_dim], dim=3)
key = key.permute(0, 2, 1, 3)
value = value.permute(0, 2, 1, 3)
query = self.queries(self.queries_subsample(hidden_state))
query = query.view(batch_size, self.resolution_out ** 2, self.num_attention_heads, self.key_dim).permute(0, 2, 1, 3)
attention = query @ key.transpose(-2, -1) * self.scale + self.get_attention_biases(hidden_state.device)
attention = attention.softmax(dim=-1)
hidden_state = (attention @ value).transpose(1, 2).reshape(batch_size, -1, self.out_dim_projection)
hidden_state = self.projection(self.activation(hidden_state))
return hidden_state
|
class LevitAttentionSubsample(nn.Module):
def __init__(self, input_dim, output_dim, key_dim, num_attention_heads, attention_ratio, stride, resolution_in, resolution_out):
pass
@torch.no_grad()
def train(self, mode=True):
pass
def get_attention_biases(self, device):
pass
def forward(self, hidden_state):
pass
| 6
| 0
| 19
| 1
| 18
| 1
| 3
| 0.03
| 1
| 7
| 2
| 0
| 4
| 14
| 4
| 14
| 81
| 8
| 72
| 43
| 56
| 2
| 52
| 32
| 47
| 4
| 1
| 3
| 10
|
3,357
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitClassificationLayer
|
from torch import nn
class LevitClassificationLayer(nn.Module):
"""
LeViT Classification Layer
"""
def __init__(self, input_dim, output_dim):
super().__init__()
self.batch_norm = nn.BatchNorm1d(input_dim)
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, hidden_state):
hidden_state = self.batch_norm(hidden_state)
logits = self.linear(hidden_state)
return logits
|
class LevitClassificationLayer(nn.Module):
'''
LeViT Classification Layer
'''
def __init__(self, input_dim, output_dim):
pass
def forward(self, hidden_state):
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 1
| 0.33
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 14
| 2
| 9
| 6
| 6
| 3
| 9
| 6
| 6
| 1
| 1
| 0
| 2
|
3,358
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitConvEmbeddings
|
from torch import nn
class LevitConvEmbeddings(nn.Module):
"""
LeViT Conv Embeddings with Batch Norm, used in the initial patch embedding layer.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bn_weight_init=1):
super().__init__()
self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation=dilation, groups=groups, bias=False)
self.batch_norm = nn.BatchNorm2d(out_channels)
def forward(self, embeddings):
embeddings = self.convolution(embeddings)
embeddings = self.batch_norm(embeddings)
return embeddings
|
class LevitConvEmbeddings(nn.Module):
'''
LeViT Conv Embeddings with Batch Norm, used in the initial patch embedding layer.
'''
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bn_weight_init=1):
pass
def forward(self, embeddings):
pass
| 3
| 1
| 6
| 0
| 6
| 0
| 1
| 0.23
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 18
| 2
| 13
| 7
| 8
| 3
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
3,359
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitEncoder
|
from torch import nn
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ModelOutput
class LevitEncoder(nn.Module):
"""
LeViT Encoder consisting of multiple `LevitStage` stages.
"""
def __init__(self, config):
super().__init__()
self.config = config
resolution = self.config.image_size // self.config.patch_size
self.stages = []
self.config.down_ops.append([''])
for stage_idx in range(len(config.depths)):
stage = LevitStage(config, stage_idx, config.hidden_sizes[stage_idx], config.key_dim[stage_idx], config.depths[stage_idx], config.num_attention_heads[stage_idx], config.attention_ratio[stage_idx], config.mlp_ratio[stage_idx], config.down_ops[stage_idx], resolution)
resolution = stage.get_resolution()
self.stages.append(stage)
self.stages = nn.ModuleList(self.stages)
def forward(self, hidden_state, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
for stage in self.stages:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
hidden_state = stage(hidden_state)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
if not return_dict:
return tuple((v for v in [hidden_state, all_hidden_states] if v is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=all_hidden_states)
|
class LevitEncoder(nn.Module):
'''
LeViT Encoder consisting of multiple `LevitStage` stages.
'''
def __init__(self, config):
pass
def forward(self, hidden_state, output_hidden_states=False, return_dict=True):
pass
| 3
| 1
| 19
| 3
| 17
| 0
| 4
| 0.09
| 1
| 5
| 2
| 0
| 2
| 2
| 2
| 12
| 44
| 7
| 34
| 10
| 31
| 3
| 23
| 10
| 20
| 6
| 1
| 2
| 8
|
3,360
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitForImageClassification
|
from ...utils import auto_docstring, logging
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ModelOutput
import torch
@auto_docstring(custom_intro='\n Levit Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class LevitForImageClassification(LevitPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.num_labels = config.num_labels
self.levit = LevitModel(config)
self.classifier = LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else torch.nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = sequence_output.mean(1)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n Levit Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class LevitForImageClassification(LevitPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 34
| 4
| 26
| 4
| 7
| 0.13
| 1
| 6
| 3
| 0
| 2
| 4
| 2
| 3
| 76
| 8
| 60
| 20
| 44
| 8
| 34
| 13
| 31
| 12
| 2
| 3
| 14
|
3,361
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitForImageClassificationWithTeacher
|
from typing import Optional, Union
import torch
from ...utils import auto_docstring, logging
@auto_docstring(custom_intro='\n LeViT Model transformer with image classification heads on top (a linear layer on top of the final hidden state and\n a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet. .. warning::\n This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet\n supported.\n ')
class LevitForImageClassificationWithTeacher(LevitPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.num_labels = config.num_labels
self.levit = LevitModel(config)
self.classifier = LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else torch.nn.Identity()
self.classifier_distill = LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else torch.nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LevitForImageClassificationWithTeacherOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = sequence_output.mean(1)
cls_logits, distill_logits = (self.classifier(sequence_output), self.classifier_distill(sequence_output))
logits = (cls_logits + distill_logits) / 2
if not return_dict:
output = (logits, cls_logits, distill_logits) + outputs[2:]
return output
return LevitForImageClassificationWithTeacherOutput(logits=logits, cls_logits=cls_logits, distillation_logits=distill_logits, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n LeViT Model transformer with image classification heads on top (a linear layer on top of the final hidden state and\n a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet. .. warning::\n This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet\n supported.\n ')
class LevitForImageClassificationWithTeacher(LevitPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LevitForImageClassificationWithTeacherOutput]:
pass
| 5
| 0
| 23
| 3
| 19
| 1
| 3
| 0.04
| 1
| 5
| 3
| 0
| 2
| 5
| 2
| 3
| 54
| 7
| 45
| 19
| 30
| 2
| 20
| 13
| 17
| 3
| 2
| 1
| 6
|
3,362
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitMLPLayer
|
from torch import nn
class LevitMLPLayer(nn.Module):
"""
MLP Layer with `2X` expansion in contrast to ViT with `4X`.
"""
def __init__(self, input_dim, hidden_dim):
super().__init__()
self.linear_up = MLPLayerWithBN(input_dim, hidden_dim)
self.activation = nn.Hardswish()
self.linear_down = MLPLayerWithBN(hidden_dim, input_dim)
def forward(self, hidden_state):
hidden_state = self.linear_up(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.linear_down(hidden_state)
return hidden_state
|
class LevitMLPLayer(nn.Module):
'''
MLP Layer with `2X` expansion in contrast to ViT with `4X`.
'''
def __init__(self, input_dim, hidden_dim):
pass
def forward(self, hidden_state):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 1
| 0.27
| 1
| 2
| 1
| 0
| 2
| 3
| 2
| 12
| 16
| 2
| 11
| 6
| 8
| 3
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,363
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitModel
|
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ModelOutput
from ...utils import auto_docstring, logging
import torch
@auto_docstring
class LevitModel(LevitPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.patch_embeddings = LevitPatchEmbeddings(config)
self.encoder = LevitEncoder(config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
embeddings = self.patch_embeddings(pixel_values)
encoder_outputs = self.encoder(embeddings, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state.mean(dim=1)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring
class LevitModel(LevitPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
pass
| 5
| 0
| 21
| 3
| 17
| 1
| 3
| 0.05
| 1
| 6
| 3
| 0
| 2
| 3
| 2
| 3
| 51
| 7
| 42
| 16
| 26
| 2
| 19
| 10
| 16
| 5
| 2
| 1
| 6
|
3,364
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitPatchEmbeddings
|
from torch import nn
class LevitPatchEmbeddings(nn.Module):
"""
LeViT patch embeddings, for final embeddings to be passed to transformer blocks. It consists of multiple
`LevitConvEmbeddings`.
"""
def __init__(self, config):
super().__init__()
self.embedding_layer_1 = LevitConvEmbeddings(config.num_channels, config.hidden_sizes[0] // 8, config.kernel_size, config.stride, config.padding)
self.activation_layer_1 = nn.Hardswish()
self.embedding_layer_2 = LevitConvEmbeddings(config.hidden_sizes[0] // 8, config.hidden_sizes[0] // 4, config.kernel_size, config.stride, config.padding)
self.activation_layer_2 = nn.Hardswish()
self.embedding_layer_3 = LevitConvEmbeddings(config.hidden_sizes[0] // 4, config.hidden_sizes[0] // 2, config.kernel_size, config.stride, config.padding)
self.activation_layer_3 = nn.Hardswish()
self.embedding_layer_4 = LevitConvEmbeddings(config.hidden_sizes[0] // 2, config.hidden_sizes[0], config.kernel_size, config.stride, config.padding)
self.num_channels = config.num_channels
def forward(self, pixel_values):
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
embeddings = self.embedding_layer_1(pixel_values)
embeddings = self.activation_layer_1(embeddings)
embeddings = self.embedding_layer_2(embeddings)
embeddings = self.activation_layer_2(embeddings)
embeddings = self.embedding_layer_3(embeddings)
embeddings = self.activation_layer_3(embeddings)
embeddings = self.embedding_layer_4(embeddings)
return embeddings.flatten(2).transpose(1, 2)
|
class LevitPatchEmbeddings(nn.Module):
'''
LeViT patch embeddings, for final embeddings to be passed to transformer blocks. It consists of multiple
`LevitConvEmbeddings`.
'''
def __init__(self, config):
pass
def forward(self, pixel_values):
pass
| 3
| 1
| 18
| 2
| 16
| 0
| 2
| 0.12
| 1
| 3
| 1
| 0
| 2
| 8
| 2
| 12
| 42
| 5
| 33
| 13
| 30
| 4
| 23
| 13
| 20
| 2
| 1
| 1
| 3
|
3,365
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitPreTrainedModel
|
from ...utils import auto_docstring, logging
from ...modeling_utils import PreTrainedModel
from .configuration_levit import LevitConfig
from torch import nn
@auto_docstring
class LevitPreTrainedModel(PreTrainedModel):
config: LevitConfig
base_model_prefix = 'levit'
main_input_name = 'pixel_values'
_no_split_modules = ['LevitResidualLayer']
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class LevitPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.54
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 1
| 22
| 2
| 13
| 6
| 11
| 7
| 12
| 6
| 10
| 4
| 1
| 2
| 4
|
3,366
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitResidualLayer
|
import torch
from torch import nn
class LevitResidualLayer(nn.Module):
"""
Residual Block for LeViT
"""
def __init__(self, module, drop_rate):
super().__init__()
self.module = module
self.drop_rate = drop_rate
def forward(self, hidden_state):
if self.training and self.drop_rate > 0:
rnd = torch.rand(hidden_state.size(0), 1, 1, device=hidden_state.device)
rnd = rnd.ge_(self.drop_rate).div(1 - self.drop_rate).detach()
hidden_state = hidden_state + self.module(hidden_state) * rnd
return hidden_state
else:
hidden_state = hidden_state + self.module(hidden_state)
return hidden_state
|
class LevitResidualLayer(nn.Module):
'''
Residual Block for LeViT
'''
def __init__(self, module, drop_rate):
pass
def forward(self, hidden_state):
pass
| 3
| 1
| 7
| 0
| 7
| 0
| 2
| 0.21
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 19
| 2
| 14
| 6
| 11
| 3
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
3,367
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitStage
|
from torch import nn
class LevitStage(nn.Module):
"""
LeViT Stage consisting of `LevitMLPLayer` and `LevitAttention` layers.
"""
def __init__(self, config, idx, hidden_sizes, key_dim, depths, num_attention_heads, attention_ratio, mlp_ratio, down_ops, resolution_in):
super().__init__()
self.layers = []
self.config = config
self.resolution_in = resolution_in
for _ in range(depths):
self.layers.append(LevitResidualLayer(LevitAttention(hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution_in), self.config.drop_path_rate))
if mlp_ratio > 0:
hidden_dim = hidden_sizes * mlp_ratio
self.layers.append(LevitResidualLayer(LevitMLPLayer(hidden_sizes, hidden_dim), self.config.drop_path_rate))
if down_ops[0] == 'Subsample':
self.resolution_out = (self.resolution_in - 1) // down_ops[5] + 1
self.layers.append(LevitAttentionSubsample(*self.config.hidden_sizes[idx:idx + 2], key_dim=down_ops[1], num_attention_heads=down_ops[2], attention_ratio=down_ops[3], stride=down_ops[5], resolution_in=resolution_in, resolution_out=self.resolution_out))
self.resolution_in = self.resolution_out
if down_ops[4] > 0:
hidden_dim = self.config.hidden_sizes[idx + 1] * down_ops[4]
self.layers.append(LevitResidualLayer(LevitMLPLayer(self.config.hidden_sizes[idx + 1], hidden_dim), self.config.drop_path_rate))
self.layers = nn.ModuleList(self.layers)
def get_resolution(self):
return self.resolution_in
def forward(self, hidden_state):
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
|
class LevitStage(nn.Module):
'''
LeViT Stage consisting of `LevitMLPLayer` and `LevitAttention` layers.
'''
def __init__(self, config, idx, hidden_sizes, key_dim, depths, num_attention_heads, attention_ratio, mlp_ratio, down_ops, resolution_in):
pass
def get_resolution(self):
pass
def forward(self, hidden_state):
pass
| 4
| 1
| 20
| 1
| 19
| 0
| 3
| 0.07
| 1
| 6
| 4
| 0
| 3
| 4
| 3
| 13
| 67
| 5
| 58
| 23
| 42
| 4
| 25
| 11
| 21
| 5
| 1
| 2
| 8
|
3,368
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.LevitSubsample
|
from torch import nn
class LevitSubsample(nn.Module):
def __init__(self, stride, resolution):
super().__init__()
self.stride = stride
self.resolution = resolution
def forward(self, hidden_state):
batch_size, _, channels = hidden_state.shape
hidden_state = hidden_state.view(batch_size, self.resolution, self.resolution, channels)[:, ::self.stride, ::self.stride].reshape(batch_size, -1, channels)
return hidden_state
|
class LevitSubsample(nn.Module):
def __init__(self, stride, resolution):
pass
def forward(self, hidden_state):
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 9
| 6
| 6
| 1
| 1
| 0
| 2
|
3,369
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/levit/modeling_levit.py
|
transformers.models.levit.modeling_levit.MLPLayerWithBN
|
from torch import nn
class MLPLayerWithBN(nn.Module):
def __init__(self, input_dim, output_dim, bn_weight_init=1):
super().__init__()
self.linear = nn.Linear(in_features=input_dim, out_features=output_dim, bias=False)
self.batch_norm = nn.BatchNorm1d(output_dim)
def forward(self, hidden_state):
hidden_state = self.linear(hidden_state)
hidden_state = self.batch_norm(hidden_state.flatten(0, 1)).reshape_as(hidden_state)
return hidden_state
|
class MLPLayerWithBN(nn.Module):
def __init__(self, input_dim, output_dim, bn_weight_init=1):
pass
def forward(self, hidden_state):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
3,370
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/configuration_lilt.py
|
transformers.models.lilt.configuration_lilt.LiltConfig
|
from ...configuration_utils import PretrainedConfig
class LiltConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`LiltModel`]. It is used to instantiate a LiLT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LiLT
[SCUT-DLVCLab/lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the LiLT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LiltModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer. Should be a multiple of 24.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`LiltModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
channel_shrink_ratio (`int`, *optional*, defaults to 4):
The shrink ratio compared to the `hidden_size` for the channel dimension of the layout embeddings.
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the 2D position embedding might ever be used with. Typically set this to something
large just in case (e.g., 1024).
Examples:
```python
>>> from transformers import LiltConfig, LiltModel
>>> # Initializing a LiLT SCUT-DLVCLab/lilt-roberta-en-base style configuration
>>> configuration = LiltConfig()
>>> # Randomly initializing a model from the SCUT-DLVCLab/lilt-roberta-en-base style configuration
>>> model = LiltModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'lilt'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', classifier_dropout=None, channel_shrink_ratio=4, max_2d_position_embeddings=1024, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.classifier_dropout = classifier_dropout
self.channel_shrink_ratio = channel_shrink_ratio
self.max_2d_position_embeddings = max_2d_position_embeddings
|
class LiltConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`LiltModel`]. It is used to instantiate a LiLT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LiLT
[SCUT-DLVCLab/lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the LiLT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LiltModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer. Should be a multiple of 24.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`LiltModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
channel_shrink_ratio (`int`, *optional*, defaults to 4):
The shrink ratio compared to the `hidden_size` for the channel dimension of the layout embeddings.
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the 2D position embedding might ever be used with. Typically set this to something
large just in case (e.g., 1024).
Examples:
```python
>>> from transformers import LiltConfig, LiltModel
>>> # Initializing a LiLT SCUT-DLVCLab/lilt-roberta-en-base style configuration
>>> configuration = LiltConfig()
>>> # Randomly initializing a model from the SCUT-DLVCLab/lilt-roberta-en-base style configuration
>>> model = LiltModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', classifier_dropout=None, channel_shrink_ratio=4, max_2d_position_embeddings=1024, **kwargs):
pass
| 2
| 1
| 39
| 1
| 38
| 0
| 1
| 1.45
| 1
| 1
| 0
| 0
| 1
| 16
| 1
| 1
| 105
| 7
| 40
| 39
| 18
| 58
| 20
| 19
| 18
| 1
| 1
| 0
| 1
|
3,371
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltAttention
|
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch
from typing import Optional, Union
from torch import nn
class LiltAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__()
self.self = LiltSelfAttention(config, position_embedding_type=position_embedding_type, layer_idx=layer_idx)
self.output = LiltSelfOutput(config)
self.pruned_heads = set()
ori_hidden_size = config.hidden_size
config.hidden_size = config.hidden_size // config.channel_shrink_ratio
self.layout_output = LiltSelfOutput(config)
config.hidden_size = ori_hidden_size
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, layout_inputs: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, layout_inputs, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0][0], hidden_states)
layout_attention_output = self.layout_output(self_outputs[0][1], layout_inputs)
outputs = ((attention_output, layout_attention_output),) + self_outputs[1:]
return outputs
|
class LiltAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, layout_inputs: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.1
| 1
| 6
| 2
| 0
| 3
| 4
| 3
| 13
| 50
| 5
| 42
| 21
| 31
| 4
| 27
| 14
| 23
| 2
| 1
| 1
| 4
|
3,372
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltClassificationHead
|
from torch import nn
import torch
class LiltClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
|
class LiltClassificationHead(nn.Module):
'''Head for sentence-level classification tasks.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
| 3
| 1
| 8
| 0
| 8
| 1
| 2
| 0.12
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 20
| 2
| 17
| 8
| 14
| 2
| 15
| 8
| 12
| 2
| 1
| 0
| 3
|
3,373
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltEncoder
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
from torch import nn
class LiltEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LiltLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states: torch.Tensor, layout_inputs: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple[torch.Tensor], BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, layout_inputs, attention_mask, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0][0]
layout_inputs = layer_outputs[0][1]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class LiltEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, layout_inputs: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple[torch.Tensor], BaseModelOutput]:
pass
| 3
| 0
| 33
| 4
| 30
| 0
| 6
| 0.02
| 1
| 8
| 2
| 0
| 2
| 3
| 2
| 12
| 69
| 8
| 60
| 20
| 48
| 1
| 25
| 11
| 22
| 10
| 1
| 2
| 11
|
3,374
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltForQuestionAnswering
|
import torch
from torch import nn
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring
class LiltForQuestionAnswering(LiltPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.lilt = LiltModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, bbox: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
"""
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModelForQuestionAnswering
>>> from datasets import load_dataset
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> model = AutoModelForQuestionAnswering.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> answer_start_index = outputs.start_logits.argmax()
>>> answer_end_index = outputs.end_logits.argmax()
>>> predict_answer_tokens = encoding.input_ids[0, answer_start_index : answer_end_index + 1]
>>> predicted_answer = tokenizer.decode(predict_answer_tokens)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.lilt(input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class LiltForQuestionAnswering(LiltPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, bbox: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
'''
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModelForQuestionAnswering
>>> from datasets import load_dataset
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> model = AutoModelForQuestionAnswering.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> answer_start_index = outputs.start_logits.argmax()
>>> answer_end_index = outputs.end_logits.argmax()
>>> predict_answer_tokens = encoding.input_ids[0, answer_start_index : answer_end_index + 1]
>>> predicted_answer = tokenizer.decode(predict_answer_tokens)
```'''
pass
| 5
| 1
| 55
| 9
| 31
| 15
| 4
| 0.48
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 115
| 19
| 65
| 31
| 46
| 31
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
3,375
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltForSequenceClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Union
from ...utils import auto_docstring, logging
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
@auto_docstring(custom_intro='\n LiLT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ')
class LiltForSequenceClassification(LiltPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.lilt = LiltModel(config, add_pooling_layer=False)
self.classifier = LiltClassificationHead(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
"""
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
>>> from datasets import load_dataset
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> model = AutoModelForSequenceClassification.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> predicted_class_idx = outputs.logits.argmax(-1).item()
>>> predicted_class = model.config.id2label[predicted_class_idx]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.lilt(input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n LiLT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ')
class LiltForSequenceClassification(LiltPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
'''
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
>>> from datasets import load_dataset
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> model = AutoModelForSequenceClassification.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> predicted_class_idx = outputs.logits.argmax(-1).item()
>>> predicted_class = model.config.id2label[predicted_class_idx]
```'''
pass
| 5
| 1
| 52
| 7
| 34
| 12
| 7
| 0.34
| 1
| 7
| 3
| 0
| 2
| 4
| 2
| 3
| 109
| 15
| 70
| 27
| 52
| 24
| 34
| 13
| 31
| 12
| 2
| 3
| 13
|
3,376
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltForTokenClassification
|
from typing import Optional, Union
from ...utils import auto_docstring, logging
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
@auto_docstring
class LiltForTokenClassification(LiltPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.lilt = LiltModel(config, add_pooling_layer=False)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, bbox: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModelForTokenClassification
>>> from datasets import load_dataset
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> model = AutoModelForTokenClassification.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> predicted_class_indices = outputs.logits.argmax(-1)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.lilt(input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
labels = labels.to(logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class LiltForTokenClassification(LiltPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, bbox: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModelForTokenClassification
>>> from datasets import load_dataset
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> model = AutoModelForTokenClassification.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> predicted_class_indices = outputs.logits.argmax(-1)
```'''
pass
| 5
| 1
| 45
| 8
| 27
| 10
| 4
| 0.37
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 94
| 16
| 57
| 28
| 39
| 21
| 24
| 14
| 21
| 5
| 2
| 1
| 7
|
3,377
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltIntermediate
|
import torch
from ...activations import ACT2FN
from torch import nn
class LiltIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class LiltIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
3,378
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltLayer
|
from typing import Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
import torch
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
class LiltLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LiltAttention(config, layer_idx=layer_idx)
self.intermediate = LiltIntermediate(config)
self.output = LiltOutput(config)
ori_hidden_size = config.hidden_size
ori_intermediate_size = config.intermediate_size
config.hidden_size = config.hidden_size // config.channel_shrink_ratio
config.intermediate_size = config.intermediate_size // config.channel_shrink_ratio
self.layout_intermediate = LiltIntermediate(config)
self.layout_output = LiltOutput(config)
config.hidden_size = ori_hidden_size
config.intermediate_size = ori_intermediate_size
def forward(self, hidden_states: torch.Tensor, layout_inputs: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(hidden_states, layout_inputs, attention_mask, head_mask, output_attentions=output_attentions)
attention_output = self_attention_outputs[0][0]
layout_attention_output = self_attention_outputs[0][1]
outputs = self_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
layout_layer_output = apply_chunking_to_forward(self.layout_feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, layout_attention_output)
outputs = ((layer_output, layout_layer_output),) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def layout_feed_forward_chunk(self, attention_output):
intermediate_output = self.layout_intermediate(attention_output)
layer_output = self.layout_output(intermediate_output, attention_output)
return layer_output
|
class LiltLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, layout_inputs: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
def layout_feed_forward_chunk(self, attention_output):
pass
| 5
| 0
| 13
| 1
| 12
| 0
| 1
| 0.04
| 1
| 6
| 3
| 0
| 4
| 7
| 4
| 14
| 58
| 7
| 50
| 31
| 38
| 2
| 33
| 24
| 28
| 1
| 1
| 0
| 4
|
3,379
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltLayoutEmbeddings
|
import torch
from torch import nn
class LiltLayoutEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
self.padding_idx = config.pad_token_id
self.box_position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size // config.channel_shrink_ratio, padding_idx=self.padding_idx)
self.box_linear_embeddings = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size // config.channel_shrink_ratio)
self.LayerNorm = nn.LayerNorm(config.hidden_size // config.channel_shrink_ratio, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, bbox=None, position_ids=None):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError('The `bbox` coordinate values should be within 0-1000 range.') from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat([left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings], dim=-1)
spatial_position_embeddings = self.box_linear_embeddings(spatial_position_embeddings)
box_position_embeddings = self.box_position_embeddings(position_ids)
spatial_position_embeddings = spatial_position_embeddings + box_position_embeddings
spatial_position_embeddings = self.LayerNorm(spatial_position_embeddings)
spatial_position_embeddings = self.dropout(spatial_position_embeddings)
return spatial_position_embeddings
|
class LiltLayoutEmbeddings(nn.Module):
def __init__(self, config):
pass
def forward(self, bbox=None, position_ids=None):
pass
| 3
| 0
| 26
| 3
| 22
| 1
| 2
| 0.04
| 1
| 2
| 0
| 0
| 2
| 9
| 2
| 12
| 54
| 7
| 45
| 21
| 42
| 2
| 29
| 20
| 26
| 2
| 1
| 1
| 3
|
3,380
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
from ...utils import auto_docstring, logging
import torch
@auto_docstring
class LiltModel(LiltPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = LiltTextEmbeddings(config)
self.layout_embeddings = LiltLayoutEmbeddings(config)
self.encoder = LiltEncoder(config)
self.pooler = LiltPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]:
"""
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModel
>>> from datasets import load_dataset
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> model = AutoModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if bbox is None:
bbox = torch.zeros(input_shape + (4,), dtype=torch.long, device=device)
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, 'token_type_ids'):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output, position_ids = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
layout_embedding_output = self.layout_embeddings(bbox=bbox, position_ids=position_ids)
encoder_outputs = self.encoder(embedding_output, layout_embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class LiltModel(LiltPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]:
'''
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModel
>>> from datasets import load_dataset
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> model = AutoModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```'''
pass
| 8
| 3
| 27
| 4
| 17
| 6
| 4
| 0.33
| 1
| 9
| 5
| 0
| 5
| 5
| 5
| 6
| 140
| 26
| 86
| 36
| 66
| 28
| 48
| 23
| 42
| 14
| 2
| 2
| 20
|
3,381
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltOutput
|
import torch
from torch import nn
class LiltOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class LiltOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,382
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltPooler
|
import torch
from torch import nn
class LiltPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class LiltPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
3,383
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltPreTrainedModel
|
from torch import nn
from ...utils import auto_docstring, logging
from ...modeling_utils import PreTrainedModel
from .configuration_lilt import LiltConfig
@auto_docstring
class LiltPreTrainedModel(PreTrainedModel):
config: LiltConfig
base_model_prefix = 'lilt'
supports_gradient_checkpointing = True
_no_split_modules = []
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class LiltPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.47
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 27
| 2
| 17
| 6
| 15
| 8
| 15
| 6
| 13
| 6
| 1
| 2
| 6
|
3,384
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltSelfAttention
|
from torch import nn
import torch
import math
class LiltSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.layout_query = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio)
self.layout_key = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio)
self.layout_value = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.channel_shrink_ratio = config.channel_shrink_ratio
self.layer_idx = layer_idx
def transpose_for_scores(self, x, r=1):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size // r)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, output_attentions=False):
layout_value_layer = self.transpose_for_scores(self.layout_value(layout_inputs), r=self.channel_shrink_ratio)
layout_key_layer = self.transpose_for_scores(self.layout_key(layout_inputs), r=self.channel_shrink_ratio)
layout_query_layer = self.transpose_for_scores(self.layout_query(layout_inputs), r=self.channel_shrink_ratio)
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
layout_attention_scores = torch.matmul(layout_query_layer, layout_key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
tmp_attention_scores = attention_scores / math.sqrt(self.attention_head_size)
tmp_layout_attention_scores = layout_attention_scores / math.sqrt(self.attention_head_size // self.channel_shrink_ratio)
attention_scores = tmp_attention_scores + tmp_layout_attention_scores
layout_attention_scores = tmp_layout_attention_scores + tmp_attention_scores
if attention_mask is not None:
layout_attention_scores = layout_attention_scores + attention_mask
layout_attention_probs = nn.Softmax(dim=-1)(layout_attention_scores)
layout_attention_probs = self.dropout(layout_attention_probs)
if head_mask is not None:
layout_attention_probs = layout_attention_probs * head_mask
layout_context_layer = torch.matmul(layout_attention_probs, layout_value_layer)
layout_context_layer = layout_context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = layout_context_layer.size()[:-2] + (self.all_head_size // self.channel_shrink_ratio,)
layout_context_layer = layout_context_layer.view(*new_context_layer_shape)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = ((context_layer, layout_context_layer), attention_probs) if output_attentions else ((context_layer, layout_context_layer),)
return outputs
|
class LiltSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
pass
def transpose_for_scores(self, x, r=1):
pass
def forward(self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, output_attentions=False):
pass
| 4
| 0
| 44
| 8
| 32
| 4
| 4
| 0.11
| 1
| 3
| 0
| 0
| 3
| 14
| 3
| 13
| 135
| 27
| 98
| 51
| 87
| 11
| 73
| 44
| 69
| 9
| 1
| 2
| 13
|
3,385
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltSelfOutput
|
import torch
from torch import nn
class LiltSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class LiltSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,386
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/lilt/modeling_lilt.py
|
transformers.models.lilt.modeling_lilt.LiltTextEmbeddings
|
import torch
from torch import nn
class LiltTextEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if position_ids is None:
if input_ids is not None:
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return (embeddings, position_ids)
def create_position_ids_from_input_ids(self, input_ids, padding_idx):
"""
Args:
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
x: torch.Tensor x:
Returns: torch.Tensor
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
Args:
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape)
|
class LiltTextEmbeddings(nn.Module):
def __init__(self, config):
pass
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
pass
def create_position_ids_from_input_ids(self, input_ids, padding_idx):
'''
Args:
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
x: torch.Tensor x:
Returns: torch.Tensor
'''
pass
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
'''
Args:
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
'''
pass
| 5
| 2
| 21
| 2
| 14
| 5
| 3
| 0.33
| 1
| 1
| 0
| 0
| 4
| 7
| 4
| 14
| 87
| 11
| 57
| 27
| 46
| 19
| 41
| 21
| 36
| 7
| 1
| 2
| 10
|
3,387
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/configuration_llama.py
|
transformers.models.llama.configuration_llama.LlamaConfig
|
from ...modeling_rope_utils import rope_config_validation
from ...configuration_utils import PretrainedConfig
class LlamaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LLaMA-7B.
e.g. [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
Llama 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import LlamaModel, LlamaConfig
>>> # Initializing a LLaMA llama-7b style configuration
>>> configuration = LlamaConfig()
>>> # Initializing a model from the llama-7b style configuration
>>> model = LlamaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'llama'
keys_to_ignore_at_inference = ['past_key_values']
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, mlp_bias=False, head_dim=None, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
if self.rope_scaling is not None and 'type' in self.rope_scaling:
self.rope_scaling['rope_type'] = self.rope_scaling['type']
rope_config_validation(self)
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class LlamaConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LLaMA-7B.
e.g. [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
Llama 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import LlamaModel, LlamaConfig
>>> # Initializing a LLaMA llama-7b style configuration
>>> configuration = LlamaConfig()
>>> # Initializing a model from the llama-7b style configuration
>>> model = LlamaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, mlp_bias=False, head_dim=None, **kwargs):
pass
| 2
| 1
| 62
| 3
| 56
| 3
| 4
| 1.63
| 1
| 1
| 0
| 1
| 1
| 18
| 1
| 1
| 191
| 12
| 68
| 48
| 41
| 111
| 29
| 23
| 27
| 4
| 1
| 1
| 4
|
3,388
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/convert_llama_weights_to_hf.py
|
transformers.models.llama.convert_llama_weights_to_hf.Llama3Converter
|
from transformers.convert_slow_tokenizer import TikTokenConverter
from tokenizers import AddedToken, processors
from transformers import GenerationConfig, LlamaConfig, LlamaForCausalLM, LlamaTokenizer, PreTrainedTokenizerFast
class Llama3Converter(TikTokenConverter):
def __init__(self, vocab_file, special_tokens=None, instruct=False, llama_version='3.2', **kwargs):
super().__init__(vocab_file, additional_special_tokens=special_tokens, **kwargs)
tokenizer = self.converted()
templates_for_version = {'2': ('meta-llama/Llama-2-7b-chat-hf', 'f5db02db724555f92da89c216ac04704f23d4590'), '3': ('meta-llama/Meta-Llama-3-8B-Instruct', '5f0b02c75b57c5855da9ae460ce51323ea669d8a'), '3.1': ('meta-llama/Llama-3.1-8B-Instruct', '0e9e39f249a16976918f6564b8830bc894c89659'), '3.2': ('meta-llama/Llama-3.2-1B-Instruct', 'e9f8effbab1cbdc515c11ee6e098e3d5a9f51e14'), 'Guard-3': ('meta-llama/Llama-Guard-3-1B', 'acf7aafa60f0410f8f42b1fa35e077d705892029')}
additional_kwargs = {}
if instruct or llama_version in ['Guard-3']:
model_id, revision = templates_for_version.get(llama_version, (None, None))
if model_id is not None:
from transformers import AutoTokenizer
t = AutoTokenizer.from_pretrained(model_id, revision=revision)
additional_kwargs['chat_template'] = t.chat_template
self.converted_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer, bos_token='<|begin_of_text|>', eos_token='<|end_of_text|>' if not instruct else '<|eot_id|>', model_input_names=['input_ids', 'attention_mask'], model_max_length=CONTEXT_LENGTH_FOR_VERSION[llama_version], clean_up_tokenization_spaces=True, **additional_kwargs)
self.update_post_processor(self.converted_tokenizer)
self.converted_tokenizer._bos_token = BOS_ADDED_TOKEN
self.converted_tokenizer._eos_token = EOT_ADDED_TOKEN if instruct else EOS_ADDED_TOKEN
def update_post_processor(self, tokenizer):
tokenizer._tokenizer.post_processor = processors.Sequence([processors.ByteLevel(trim_offsets=False), processors.TemplateProcessing(single='<|begin_of_text|> $A', pair='<|begin_of_text|>:0 $A:0 <|begin_of_text|>:1 $B:1', special_tokens=[('<|begin_of_text|>', tokenizer.convert_tokens_to_ids('<|begin_of_text|>'))])])
|
class Llama3Converter(TikTokenConverter):
def __init__(self, vocab_file, special_tokens=None, instruct=False, llama_version='3.2', **kwargs):
pass
def update_post_processor(self, tokenizer):
pass
| 3
| 0
| 26
| 2
| 21
| 3
| 3
| 0.14
| 1
| 3
| 0
| 0
| 2
| 1
| 2
| 6
| 54
| 5
| 43
| 10
| 39
| 6
| 18
| 10
| 14
| 5
| 1
| 2
| 6
|
3,389
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaAttention
|
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...cache_utils import Cache, DynamicCache
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
import torch
from ...utils.deprecation import deprecate_kwarg
from .configuration_llama import LlamaConfig
from torch import nn
class LlamaAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: LlamaConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class LlamaAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: LlamaConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 1
| 35
| 4
| 31
| 1
| 3
| 0.03
| 1
| 6
| 3
| 7
| 2
| 11
| 2
| 12
| 74
| 9
| 63
| 31
| 52
| 2
| 34
| 23
| 31
| 5
| 1
| 2
| 6
|
3,390
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaDecoderLayer
|
from .configuration_llama import LlamaConfig
from ...processing_utils import Unpack
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
import torch
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache
from ...utils.deprecation import deprecate_kwarg
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
class LlamaDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: LlamaConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = LlamaAttention(config=config, layer_idx=layer_idx)
self.mlp = LlamaMLP(config)
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class LlamaDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: LlamaConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 25
| 4
| 21
| 2
| 2
| 0.07
| 1
| 10
| 6
| 9
| 2
| 5
| 2
| 12
| 52
| 8
| 42
| 22
| 28
| 3
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
3,391
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaForCausalLM
|
from torch import nn
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...processing_utils import Unpack
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from typing import Callable, Optional, Union
import torch
@auto_docstring
class LlamaForCausalLM(LlamaPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
_tp_plan = {'lm_head': 'colwise_rep'}
_pp_plan = {'lm_head': (['hidden_states'], ['logits'])}
def __init__(self, config):
super().__init__(config)
self.model = LlamaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
"""
Example:
```python
>>> from transformers import AutoTokenizer, LlamaForCausalLM
>>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class LlamaForCausalLM(LlamaPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
'''
Example:
```python
>>> from transformers import AutoTokenizer, LlamaForCausalLM
>>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```'''
pass
| 6
| 1
| 14
| 2
| 9
| 4
| 2
| 0.38
| 2
| 9
| 4
| 10
| 8
| 3
| 8
| 9
| 123
| 21
| 74
| 36
| 47
| 28
| 36
| 20
| 27
| 8
| 2
| 1
| 15
|
3,392
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaForQuestionAnswering
|
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class LlamaForQuestionAnswering(GenericForQuestionAnswering, LlamaPreTrainedModel):
base_model_prefix = 'transformer'
|
class LlamaForQuestionAnswering(GenericForQuestionAnswering, LlamaPreTrainedModel):
pass
| 1
| 0
| 18
| 2
| 13
| 3
| 2
| 0.22
| 1
| 5
| 3
| 3
| 4
| 2
| 4
| 5
| 78
| 11
| 55
| 28
| 36
| 12
| 26
| 14
| 21
| 5
| 2
| 1
| 8
|
3,393
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaForSequenceClassification
|
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class LlamaForSequenceClassification(GenericForSequenceClassification, LlamaPreTrainedModel):
...
|
class LlamaForSequenceClassification(GenericForSequenceClassification, LlamaPreTrainedModel):
pass
| 1
| 0
| 21
| 2
| 17
| 2
| 3
| 0.11
| 1
| 7
| 3
| 6
| 4
| 3
| 4
| 5
| 90
| 11
| 71
| 31
| 53
| 8
| 36
| 18
| 31
| 9
| 2
| 1
| 12
|
3,394
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaForTokenClassification
|
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class LlamaForTokenClassification(GenericForTokenClassification, LlamaPreTrainedModel):
...
|
class LlamaForTokenClassification(GenericForTokenClassification, LlamaPreTrainedModel):
pass
| 1
| 0
| 17
| 1
| 14
| 2
| 3
| 0.11
| 1
| 5
| 2
| 6
| 4
| 4
| 4
| 5
| 79
| 8
| 64
| 28
| 41
| 7
| 29
| 15
| 24
| 5
| 2
| 1
| 10
|
3,395
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaMLP
|
from torch import nn
from ...activations import ACT2FN
class LlamaMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class LlamaMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 7
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
3,396
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaModel
|
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from torch import nn
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from ...utils.generic import check_model_inputs
import torch
from ...masking_utils import create_causal_mask
from ...cache_utils import Cache, DynamicCache
from .configuration_llama import LlamaConfig
@auto_docstring
class LlamaModel(LlamaPreTrainedModel):
def __init__(self, config: LlamaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = LlamaRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
@auto_docstring
class LlamaModel(LlamaPreTrainedModel):
def __init__(self, config: LlamaConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 6
| 0
| 40
| 5
| 30
| 6
| 6
| 0.22
| 1
| 16
| 10
| 13
| 5
| 7
| 6
| 7
| 257
| 34
| 184
| 65
| 146
| 40
| 89
| 34
| 82
| 21
| 2
| 2
| 37
|
3,397
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_llama import LlamaConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
@auto_docstring
class LlamaPreTrainedModel(PreTrainedModel):
config: LlamaConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['LlamaDecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': LlamaDecoderLayer, 'attentions': LlamaAttention}
|
@auto_docstring
class LlamaPreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 9
| 1
| 0
| 1
| 1
| 24
| 1
| 23
| 15
| 21
| 0
| 22
| 15
| 20
| 5
| 1
| 2
| 5
|
3,398
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaRMSNorm
|
from torch import nn
import torch
from ...integrations import use_kernel_forward_from_hub
@use_kernel_forward_from_hub('RMSNorm')
class LlamaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
LlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
@use_kernel_forward_from_hub('RMSNorm')
class LlamaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
LlamaRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 5
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 2
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
3,399
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/modeling_llama.py
|
transformers.models.llama.modeling_llama.LlamaRotaryEmbedding
|
from .configuration_llama import LlamaConfig
from torch import nn
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
class LlamaRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: LlamaConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class LlamaRotaryEmbedding(nn.Module):
def __init__(self, config: LlamaConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 4
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.