id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,600
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/tokenization_marian.py
|
transformers.models.marian.tokenization_marian.MarianTokenizer
|
import os
import warnings
from ...utils.import_utils import requires
from ...tokenization_utils import PreTrainedTokenizer
from shutil import copyfile
from pathlib import Path
from typing import Any, Optional, Union
@requires(backends=('sentencepiece',))
class MarianTokenizer(PreTrainedTokenizer):
"""
Construct a Marian tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
source_spm (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary for the source language.
target_spm (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary for the target language.
source_lang (`str`, *optional*):
A string representing the source language.
target_lang (`str`, *optional*):
A string representing the target language.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
model_max_length (`int`, *optional*, defaults to 512):
The maximum sentence length the model accepts.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples:
```python
>>> from transformers import MarianForCausalLM, MarianTokenizer
>>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> src_texts = ["I am a small frog.", "Tom asked his teacher for advice."]
>>> tgt_texts = ["Ich bin ein kleiner Frosch.", "Tom bat seinen Lehrer um Rat."] # optional
>>> inputs = tokenizer(src_texts, text_target=tgt_texts, return_tensors="pt", padding=True)
>>> outputs = model(**inputs) # should work
```"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, source_spm, target_spm, vocab, target_vocab_file=None, source_lang=None, target_lang=None, unk_token='<unk>', eos_token='</s>', pad_token='<pad>', model_max_length=512, sp_model_kwargs: Optional[dict[str, Any]]=None, separate_vocabs=False, **kwargs) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
assert Path(source_spm).exists(), f'cannot find spm source {source_spm}'
self.separate_vocabs = separate_vocabs
self.encoder = load_json(vocab)
if str(unk_token) not in self.encoder:
raise KeyError('<unk> token must be in the vocab')
assert str(pad_token) in self.encoder
if separate_vocabs:
self.target_encoder = load_json(target_vocab_file)
self.decoder = {v: k for k, v in self.target_encoder.items()}
self.supported_language_codes = []
else:
self.decoder = {v: k for k, v in self.encoder.items()}
self.supported_language_codes: list = [k for k in self.encoder if k.startswith('>>') and k.endswith('<<')]
self.source_lang = source_lang
self.target_lang = target_lang
self.spm_files = [source_spm, target_spm]
self.spm_source = load_spm(source_spm, self.sp_model_kwargs)
self.spm_target = load_spm(target_spm, self.sp_model_kwargs)
self.current_spm = self.spm_source
self.current_encoder = self.encoder
self._setup_normalizer()
super().__init__(source_lang=source_lang, target_lang=target_lang, unk_token=unk_token, eos_token=eos_token, pad_token=pad_token, model_max_length=model_max_length, sp_model_kwargs=self.sp_model_kwargs, target_vocab_file=target_vocab_file, separate_vocabs=separate_vocabs, **kwargs)
def _setup_normalizer(self):
try:
from sacremoses import MosesPunctNormalizer
self.punc_normalizer = MosesPunctNormalizer(self.source_lang).normalize
except (ImportError, FileNotFoundError):
warnings.warn('Recommended: pip install sacremoses.')
self.punc_normalizer = lambda x: x
def normalize(self, x: str) -> str:
"""Cover moses empty string edge case. They return empty list for '' input!"""
return self.punc_normalizer(x) if x else ''
def _convert_token_to_id(self, token):
return self.current_encoder.get(token, self.current_encoder[self.unk_token])
def remove_language_code(self, text: str):
"""Remove language codes like >>fr<< before sentencepiece"""
code = []
if text.startswith('>>') and (end_loc := text.find('<<')) != -1:
code.append(text[:end_loc + 2])
text = text[end_loc + 2:]
return (code, text)
def _tokenize(self, text: str) -> list[str]:
code, text = self.remove_language_code(text)
pieces = self.current_spm.encode(text, out_type=str)
return code + pieces
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the decoder."""
return self.decoder.get(index, self.unk_token)
def batch_decode(self, sequences, **kwargs):
"""
Convert a list of lists of token ids into a list of strings by calling decode.
Args:
sequences (`Union[list[int], list[list[int]], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
use_source_tokenizer (`bool`, *optional*, defaults to `False`):
Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence
problems).
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`list[str]`: The list of decoded sentences.
"""
return super().batch_decode(sequences, **kwargs)
def decode(self, token_ids, **kwargs):
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, list[int], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
use_source_tokenizer (`bool`, *optional*, defaults to `False`):
Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence
problems).
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
"""
return super().decode(token_ids, **kwargs)
def convert_tokens_to_string(self, tokens: list[str]) -> str:
"""Uses source spm if _decode_use_source_tokenizer is True, and target spm otherwise"""
sp_model = self.spm_source if self._decode_use_source_tokenizer else self.spm_target
current_sub_tokens = []
out_string = ''
for token in tokens:
if token in self.all_special_tokens:
out_string += sp_model.decode_pieces(current_sub_tokens) + token + ' '
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += sp_model.decode_pieces(current_sub_tokens)
out_string = out_string.replace(SPIECE_UNDERLINE, ' ')
return out_string.strip()
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]:
"""Build model inputs from a sequence by appending eos_token_id."""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
return token_ids_0 + token_ids_1 + [self.eos_token_id]
def _switch_to_input_mode(self):
self.current_spm = self.spm_source
self.current_encoder = self.encoder
def _switch_to_target_mode(self):
self.current_spm = self.spm_target
if self.separate_vocabs:
self.current_encoder = self.target_encoder
@property
def vocab_size(self) -> int:
return len(self.encoder)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
saved_files = []
if self.separate_vocabs:
out_src_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab'])
out_tgt_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['target_vocab_file'])
save_json(self.encoder, out_src_vocab_file)
save_json(self.target_encoder, out_tgt_vocab_file)
saved_files.append(out_src_vocab_file)
saved_files.append(out_tgt_vocab_file)
else:
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab'])
save_json(self.encoder, out_vocab_file)
saved_files.append(out_vocab_file)
for spm_save_filename, spm_orig_path, spm_model in zip([VOCAB_FILES_NAMES['source_spm'], VOCAB_FILES_NAMES['target_spm']], self.spm_files, [self.spm_source, self.spm_target]):
spm_save_path = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + spm_save_filename)
if os.path.abspath(spm_orig_path) != os.path.abspath(spm_save_path) and os.path.isfile(spm_orig_path):
copyfile(spm_orig_path, spm_save_path)
saved_files.append(spm_save_path)
elif not os.path.isfile(spm_orig_path):
with open(spm_save_path, 'wb') as fi:
content_spiece_model = spm_model.serialized_model_proto()
fi.write(content_spiece_model)
saved_files.append(spm_save_path)
return tuple(saved_files)
def get_vocab(self) -> dict:
return self.get_src_vocab()
def get_src_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def get_tgt_vocab(self):
return dict(self.target_encoder, **self.added_tokens_decoder)
def __getstate__(self) -> dict:
state = self.__dict__.copy()
state.update(dict.fromkeys(['spm_source', 'spm_target', 'current_spm', 'punc_normalizer', 'target_vocab_file']))
return state
def __setstate__(self, d: dict) -> None:
self.__dict__ = d
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.spm_source, self.spm_target = (load_spm(f, self.sp_model_kwargs) for f in self.spm_files)
self.current_spm = self.spm_source
self._setup_normalizer()
def num_special_tokens_to_add(self, *args, **kwargs):
"""Just EOS"""
return 1
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids)
all_special_ids.remove(self.unk_token_id)
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:
"""Get list where entries are [1] if a token is [eos] or [pad] else 0."""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [1]
else:
return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
|
@requires(backends=('sentencepiece',))
class MarianTokenizer(PreTrainedTokenizer):
'''
Construct a Marian tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
source_spm (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary for the source language.
target_spm (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary for the target language.
source_lang (`str`, *optional*):
A string representing the source language.
target_lang (`str`, *optional*):
A string representing the target language.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
model_max_length (`int`, *optional*, defaults to 512):
The maximum sentence length the model accepts.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples:
```python
>>> from transformers import MarianForCausalLM, MarianTokenizer
>>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> src_texts = ["I am a small frog.", "Tom asked his teacher for advice."]
>>> tgt_texts = ["Ich bin ein kleiner Frosch.", "Tom bat seinen Lehrer um Rat."] # optional
>>> inputs = tokenizer(src_texts, text_target=tgt_texts, return_tensors="pt", padding=True)
>>> outputs = model(**inputs) # should work
```'''
def __init__(self, source_spm, target_spm, vocab, target_vocab_file=None, source_lang=None, target_lang=None, unk_token='<unk>', eos_token='</s>', pad_token='<pad>', model_max_length=512, sp_model_kwargs: Optional[dict[str, Any]]=None, separate_vocabs=False, **kwargs) -> None:
pass
def _setup_normalizer(self):
pass
def normalize(self, x: str) -> str:
'''Cover moses empty string edge case. They return empty list for '' input!'''
pass
def _convert_token_to_id(self, token):
pass
def remove_language_code(self, text: str):
'''Remove language codes like >>fr<< before sentencepiece'''
pass
def _tokenize(self, text: str) -> list[str]:
pass
def _convert_id_to_token(self, index: int) -> str:
'''Converts an index (integer) in a token (str) using the decoder.'''
pass
def batch_decode(self, sequences, **kwargs):
'''
Convert a list of lists of token ids into a list of strings by calling decode.
Args:
sequences (`Union[list[int], list[list[int]], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
use_source_tokenizer (`bool`, *optional*, defaults to `False`):
Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence
problems).
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`list[str]`: The list of decoded sentences.
'''
pass
def decode(self, token_ids, **kwargs):
'''
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, list[int], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
use_source_tokenizer (`bool`, *optional*, defaults to `False`):
Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence
problems).
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
'''
pass
def convert_tokens_to_string(self, tokens: list[str]) -> str:
'''Uses source spm if _decode_use_source_tokenizer is True, and target spm otherwise'''
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]:
'''Build model inputs from a sequence by appending eos_token_id.'''
pass
def _switch_to_input_mode(self):
pass
def _switch_to_target_mode(self):
pass
@property
def vocab_size(self) -> int:
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def get_vocab(self) -> dict:
pass
def get_src_vocab(self):
pass
def get_tgt_vocab(self):
pass
def __getstate__(self) -> dict:
pass
def __setstate__(self, d: dict) -> None:
pass
def num_special_tokens_to_add(self, *args, **kwargs):
'''Just EOS'''
pass
def _special_token_mask(self, seq):
pass
def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:
'''Get list where entries are [1] if a token is [eos] or [pad] else 0.'''
pass
| 26
| 10
| 11
| 1
| 8
| 2
| 2
| 0.56
| 1
| 14
| 0
| 0
| 23
| 15
| 23
| 112
| 332
| 52
| 181
| 79
| 138
| 102
| 130
| 60
| 105
| 10
| 3
| 3
| 47
|
3,601
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/configuration_markuplm.py
|
transformers.models.markuplm.configuration_markuplm.MarkupLMConfig
|
from ...configuration_utils import PretrainedConfig
class MarkupLMConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MarkupLMModel`]. It is used to instantiate a
MarkupLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the MarkupLM
[microsoft/markuplm-base](https://huggingface.co/microsoft/markuplm-base) architecture.
Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the
documentation from [`BertConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the MarkupLM model. Defines the different tokens that can be represented by the
*inputs_ids* passed to the forward method of [`MarkupLMModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed into [`MarkupLMModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
max_tree_id_unit_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the tree id unit embedding might ever use. Typically set this to something large
just in case (e.g., 1024).
max_xpath_tag_unit_embeddings (`int`, *optional*, defaults to 256):
The maximum value that the xpath tag unit embedding might ever use. Typically set this to something large
just in case (e.g., 256).
max_xpath_subs_unit_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the xpath subscript unit embedding might ever use. Typically set this to something
large just in case (e.g., 1024).
tag_pad_id (`int`, *optional*, defaults to 216):
The id of the padding token in the xpath tags.
subs_pad_id (`int`, *optional*, defaults to 1001):
The id of the padding token in the xpath subscripts.
xpath_tag_unit_hidden_size (`int`, *optional*, defaults to 32):
The hidden size of each tree id unit. One complete tree index will have
(50*xpath_tag_unit_hidden_size)-dim.
max_depth (`int`, *optional*, defaults to 50):
The maximum depth in xpath.
Examples:
```python
>>> from transformers import MarkupLMModel, MarkupLMConfig
>>> # Initializing a MarkupLM microsoft/markuplm-base style configuration
>>> configuration = MarkupLMConfig()
>>> # Initializing a model from the microsoft/markuplm-base style configuration
>>> model = MarkupLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'markuplm'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=0, eos_token_id=2, max_xpath_tag_unit_embeddings=256, max_xpath_subs_unit_embeddings=1024, tag_pad_id=216, subs_pad_id=1001, xpath_unit_hidden_size=32, max_depth=50, use_cache=True, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
self.max_depth = max_depth
self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings
self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings
self.tag_pad_id = tag_pad_id
self.subs_pad_id = subs_pad_id
self.xpath_unit_hidden_size = xpath_unit_hidden_size
|
class MarkupLMConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MarkupLMModel`]. It is used to instantiate a
MarkupLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the MarkupLM
[microsoft/markuplm-base](https://huggingface.co/microsoft/markuplm-base) architecture.
Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the
documentation from [`BertConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the MarkupLM model. Defines the different tokens that can be represented by the
*inputs_ids* passed to the forward method of [`MarkupLMModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed into [`MarkupLMModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
max_tree_id_unit_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the tree id unit embedding might ever use. Typically set this to something large
just in case (e.g., 1024).
max_xpath_tag_unit_embeddings (`int`, *optional*, defaults to 256):
The maximum value that the xpath tag unit embedding might ever use. Typically set this to something large
just in case (e.g., 256).
max_xpath_subs_unit_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the xpath subscript unit embedding might ever use. Typically set this to something
large just in case (e.g., 1024).
tag_pad_id (`int`, *optional*, defaults to 216):
The id of the padding token in the xpath tags.
subs_pad_id (`int`, *optional*, defaults to 1001):
The id of the padding token in the xpath subscripts.
xpath_tag_unit_hidden_size (`int`, *optional*, defaults to 32):
The hidden size of each tree id unit. One complete tree index will have
(50*xpath_tag_unit_hidden_size)-dim.
max_depth (`int`, *optional*, defaults to 50):
The maximum depth in xpath.
Examples:
```python
>>> from transformers import MarkupLMModel, MarkupLMConfig
>>> # Initializing a MarkupLM microsoft/markuplm-base style configuration
>>> configuration = MarkupLMConfig()
>>> # Initializing a model from the microsoft/markuplm-base style configuration
>>> model = MarkupLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=0, eos_token_id=2, max_xpath_tag_unit_embeddings=256, max_xpath_subs_unit_embeddings=1024, tag_pad_id=216, subs_pad_id=1001, xpath_unit_hidden_size=32, max_depth=50, use_cache=True, classifier_dropout=None, **kwargs):
pass
| 2
| 1
| 56
| 0
| 55
| 1
| 1
| 1.12
| 1
| 1
| 0
| 0
| 1
| 21
| 1
| 1
| 130
| 9
| 57
| 51
| 28
| 64
| 25
| 24
| 23
| 1
| 1
| 0
| 1
|
3,602
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/feature_extraction_markuplm.py
|
transformers.models.markuplm.feature_extraction_markuplm.MarkupLMFeatureExtractor
|
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bs4_available, logging, requires_backends
import html
class MarkupLMFeatureExtractor(FeatureExtractionMixin):
"""
Constructs a MarkupLM feature extractor. This can be used to get a list of nodes and corresponding xpaths from HTML
strings.
This feature extractor inherits from [`~feature_extraction_utils.PreTrainedFeatureExtractor`] which contains most
of the main methods. Users should refer to this superclass for more information regarding those methods.
"""
def __init__(self, **kwargs):
requires_backends(self, ['bs4'])
super().__init__(**kwargs)
def xpath_soup(self, element):
xpath_tags = []
xpath_subscripts = []
child = element if element.name else element.parent
for parent in child.parents:
siblings = parent.find_all(child.name, recursive=False)
xpath_tags.append(child.name)
xpath_subscripts.append(0 if 1 == len(siblings) else next((i for i, s in enumerate(siblings, 1) if s is child)))
child = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return (xpath_tags, xpath_subscripts)
def get_three_from_single(self, html_string):
html_code = BeautifulSoup(html_string, 'html.parser')
all_doc_strings = []
string2xtag_seq = []
string2xsubs_seq = []
for element in html_code.descendants:
if isinstance(element, bs4.element.NavigableString):
if type(element.parent) is not bs4.element.Tag:
continue
text_in_this_tag = html.unescape(element).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(text_in_this_tag)
xpath_tags, xpath_subscripts = self.xpath_soup(element)
string2xtag_seq.append(xpath_tags)
string2xsubs_seq.append(xpath_subscripts)
if len(all_doc_strings) != len(string2xtag_seq):
raise ValueError('Number of doc strings and xtags does not correspond')
if len(all_doc_strings) != len(string2xsubs_seq):
raise ValueError('Number of doc strings and xsubs does not correspond')
return (all_doc_strings, string2xtag_seq, string2xsubs_seq)
def construct_xpath(self, xpath_tags, xpath_subscripts):
xpath = ''
for tagname, subs in zip(xpath_tags, xpath_subscripts):
xpath += f'/{tagname}'
if subs != 0:
xpath += f'[{subs}]'
return xpath
def __call__(self, html_strings) -> BatchFeature:
"""
Main method to prepare for the model one or several HTML strings.
Args:
html_strings (`str`, `list[str]`):
The HTML string or batch of HTML strings from which to extract nodes and corresponding xpaths.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **nodes** -- Nodes.
- **xpaths** -- Corresponding xpaths.
Examples:
```python
>>> from transformers import MarkupLMFeatureExtractor
>>> page_name_1 = "page1.html"
>>> page_name_2 = "page2.html"
>>> page_name_3 = "page3.html"
>>> with open(page_name_1) as f:
... single_html_string = f.read()
>>> feature_extractor = MarkupLMFeatureExtractor()
>>> # single example
>>> encoding = feature_extractor(single_html_string)
>>> print(encoding.keys())
>>> # dict_keys(['nodes', 'xpaths'])
>>> # batched example
>>> multi_html_strings = []
>>> with open(page_name_2) as f:
... multi_html_strings.append(f.read())
>>> with open(page_name_3) as f:
... multi_html_strings.append(f.read())
>>> encoding = feature_extractor(multi_html_strings)
>>> print(encoding.keys())
>>> # dict_keys(['nodes', 'xpaths'])
```"""
valid_strings = False
if isinstance(html_strings, str):
valid_strings = True
elif isinstance(html_strings, (list, tuple)):
if len(html_strings) == 0 or isinstance(html_strings[0], str):
valid_strings = True
if not valid_strings:
raise ValueError(f'HTML strings must of type `str`, `list[str]` (batch of examples), but is of type {type(html_strings)}.')
is_batched = isinstance(html_strings, (list, tuple)) and isinstance(html_strings[0], str)
if not is_batched:
html_strings = [html_strings]
nodes = []
xpaths = []
for html_string in html_strings:
all_doc_strings, string2xtag_seq, string2xsubs_seq = self.get_three_from_single(html_string)
nodes.append(all_doc_strings)
xpath_strings = []
for node, tag_list, sub_list in zip(all_doc_strings, string2xtag_seq, string2xsubs_seq):
xpath_string = self.construct_xpath(tag_list, sub_list)
xpath_strings.append(xpath_string)
xpaths.append(xpath_strings)
data = {'nodes': nodes, 'xpaths': xpaths}
encoded_inputs = BatchFeature(data=data, tensor_type=None)
return encoded_inputs
|
class MarkupLMFeatureExtractor(FeatureExtractionMixin):
'''
Constructs a MarkupLM feature extractor. This can be used to get a list of nodes and corresponding xpaths from HTML
strings.
This feature extractor inherits from [`~feature_extraction_utils.PreTrainedFeatureExtractor`] which contains most
of the main methods. Users should refer to this superclass for more information regarding those methods.
'''
def __init__(self, **kwargs):
pass
def xpath_soup(self, element):
pass
def get_three_from_single(self, html_string):
pass
def construct_xpath(self, xpath_tags, xpath_subscripts):
pass
def __call__(self, html_strings) -> BatchFeature:
'''
Main method to prepare for the model one or several HTML strings.
Args:
html_strings (`str`, `list[str]`):
The HTML string or batch of HTML strings from which to extract nodes and corresponding xpaths.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **nodes** -- Nodes.
- **xpaths** -- Corresponding xpaths.
Examples:
```python
>>> from transformers import MarkupLMFeatureExtractor
>>> page_name_1 = "page1.html"
>>> page_name_2 = "page2.html"
>>> page_name_3 = "page3.html"
>>> with open(page_name_1) as f:
... single_html_string = f.read()
>>> feature_extractor = MarkupLMFeatureExtractor()
>>> # single example
>>> encoding = feature_extractor(single_html_string)
>>> print(encoding.keys())
>>> # dict_keys(['nodes', 'xpaths'])
>>> # batched example
>>> multi_html_strings = []
>>> with open(page_name_2) as f:
... multi_html_strings.append(f.read())
>>> with open(page_name_3) as f:
... multi_html_strings.append(f.read())
>>> encoding = feature_extractor(multi_html_strings)
>>> print(encoding.keys())
>>> # dict_keys(['nodes', 'xpaths'])
```'''
pass
| 6
| 2
| 27
| 6
| 15
| 7
| 5
| 0.58
| 1
| 11
| 1
| 0
| 5
| 0
| 5
| 17
| 151
| 35
| 74
| 32
| 68
| 43
| 68
| 31
| 62
| 8
| 2
| 3
| 23
|
3,603
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMAttention
|
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from typing import Callable, Optional, Union
import torch
from torch import nn
class MarkupLMAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = MarkupLMSelfAttention(config)
self.output = MarkupLMSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, **kwargs)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class MarkupLMAttention(nn.Module):
def __init__(self, config):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
3,604
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMEmbeddings
|
from torch import nn
import torch
class MarkupLMEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.config = config
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.max_depth = config.max_depth
self.xpath_embeddings = XPathEmbeddings(config)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx)
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape)
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
def forward(self, input_ids=None, xpath_tags_seq=None, xpath_subs_seq=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
if input_ids is not None:
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if xpath_tags_seq is None:
xpath_tags_seq = self.config.tag_pad_id * torch.ones(tuple(list(input_shape) + [self.max_depth]), dtype=torch.long, device=device)
if xpath_subs_seq is None:
xpath_subs_seq = self.config.subs_pad_id * torch.ones(tuple(list(input_shape) + [self.max_depth]), dtype=torch.long, device=device)
words_embeddings = inputs_embeds
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
xpath_embeddings = self.xpath_embeddings(xpath_tags_seq, xpath_subs_seq)
embeddings = words_embeddings + position_embeddings + token_type_embeddings + xpath_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class MarkupLMEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
'''
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
'''
pass
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
'''
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
'''
pass
def forward(self, input_ids=None, xpath_tags_seq=None, xpath_subs_seq=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
pass
| 7
| 3
| 30
| 6
| 21
| 3
| 4
| 0.15
| 1
| 4
| 1
| 0
| 3
| 9
| 3
| 13
| 96
| 21
| 65
| 32
| 52
| 10
| 44
| 23
| 40
| 9
| 1
| 2
| 11
|
3,605
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMEncoder
|
from ...utils import auto_docstring, can_return_tuple, logging
from typing import Callable, Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
class MarkupLMEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([MarkupLMLayer(config) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, **kwargs)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class MarkupLMEncoder(nn.Module):
def __init__(self, config):
pass
@can_return_tuple
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutput]:
pass
| 4
| 0
| 45
| 4
| 41
| 0
| 9
| 0
| 1
| 8
| 2
| 0
| 2
| 3
| 2
| 12
| 91
| 8
| 83
| 26
| 68
| 0
| 35
| 14
| 32
| 17
| 1
| 3
| 18
|
3,606
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMForQuestionAnswering
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
from ...utils import auto_docstring, can_return_tuple, logging
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Callable, Optional, Union
@auto_docstring
class MarkupLMForQuestionAnswering(MarkupLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.markuplm = MarkupLMModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, xpath_tags_seq: Optional[torch.Tensor]=None, xpath_subs_seq: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
"""
xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Tag IDs for each token in the input sequence, padded up to config.max_depth.
xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Subscript IDs for each token in the input sequence, padded up to config.max_depth.
Examples:
```python
>>> from transformers import AutoProcessor, MarkupLMForQuestionAnswering
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base-finetuned-websrc")
>>> model = MarkupLMForQuestionAnswering.from_pretrained("microsoft/markuplm-base-finetuned-websrc")
>>> html_string = "<html> <head> <title>My name is Niels</title> </head> </html>"
>>> question = "What's his name?"
>>> encoding = processor(html_string, questions=question, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> answer_start_index = outputs.start_logits.argmax()
>>> answer_end_index = outputs.end_logits.argmax()
>>> predict_answer_tokens = encoding.input_ids[0, answer_start_index : answer_end_index + 1]
>>> processor.decode(predict_answer_tokens).strip()
'Niels'
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.markuplm(input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class MarkupLMForQuestionAnswering(MarkupLMPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, xpath_tags_seq: Optional[torch.Tensor]=None, xpath_subs_seq: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
'''
xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Tag IDs for each token in the input sequence, padded up to config.max_depth.
xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Subscript IDs for each token in the input sequence, padded up to config.max_depth.
Examples:
```python
>>> from transformers import AutoProcessor, MarkupLMForQuestionAnswering
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base-finetuned-websrc")
>>> model = MarkupLMForQuestionAnswering.from_pretrained("microsoft/markuplm-base-finetuned-websrc")
>>> html_string = "<html> <head> <title>My name is Niels</title> </head> </html>"
>>> question = "What's his name?"
>>> encoding = processor(html_string, questions=question, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> answer_start_index = outputs.start_logits.argmax()
>>> answer_end_index = outputs.end_logits.argmax()
>>> predict_answer_tokens = encoding.input_ids[0, answer_start_index : answer_end_index + 1]
>>> processor.decode(predict_answer_tokens).strip()
'Niels'
```'''
pass
| 6
| 1
| 56
| 9
| 32
| 15
| 4
| 0.46
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 4
| 117
| 19
| 67
| 32
| 47
| 31
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
3,607
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMForSequenceClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
from typing import Callable, Optional, Union
from ...utils import auto_docstring, can_return_tuple, logging
import torch
@auto_docstring(custom_intro='\n MarkupLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class MarkupLMForSequenceClassification(MarkupLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.markuplm = MarkupLMModel(config)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, xpath_tags_seq: Optional[torch.Tensor]=None, xpath_subs_seq: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
"""
xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Tag IDs for each token in the input sequence, padded up to config.max_depth.
xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Subscript IDs for each token in the input sequence, padded up to config.max_depth.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForSequenceClassification
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
>>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/markuplm-base", num_labels=7)
>>> html_string = "<html> <head> <title>Page Title</title> </head> </html>"
>>> encoding = processor(html_string, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.markuplm(input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n MarkupLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class MarkupLMForSequenceClassification(MarkupLMPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, xpath_tags_seq: Optional[torch.Tensor]=None, xpath_subs_seq: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
'''
xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Tag IDs for each token in the input sequence, padded up to config.max_depth.
xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Subscript IDs for each token in the input sequence, padded up to config.max_depth.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForSequenceClassification
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
>>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/markuplm-base", num_labels=7)
>>> html_string = "<html> <head> <title>Page Title</title> </head> </html>"
>>> encoding = processor(html_string, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```'''
pass
| 6
| 1
| 54
| 8
| 37
| 10
| 7
| 0.28
| 1
| 6
| 2
| 0
| 2
| 5
| 2
| 4
| 113
| 16
| 76
| 30
| 57
| 21
| 36
| 15
| 33
| 12
| 2
| 3
| 14
|
3,608
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMForTokenClassification
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, can_return_tuple, logging
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n MarkupLM Model with a `token_classification` head on top.\n ')
class MarkupLMForTokenClassification(MarkupLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.markuplm = MarkupLMModel(config, add_pooling_layer=False)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, xpath_tags_seq: Optional[torch.Tensor]=None, xpath_subs_seq: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
"""
xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Tag IDs for each token in the input sequence, padded up to config.max_depth.
xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Subscript IDs for each token in the input sequence, padded up to config.max_depth.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForTokenClassification
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
>>> processor.parse_html = False
>>> model = AutoModelForTokenClassification.from_pretrained("microsoft/markuplm-base", num_labels=7)
>>> nodes = ["hello", "world"]
>>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"]
>>> node_labels = [1, 2]
>>> encoding = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.markuplm(input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
sequence_output = outputs[0]
prediction_scores = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(prediction_scores.view(-1, self.config.num_labels), labels.view(-1))
return TokenClassifierOutput(loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n MarkupLM Model with a `token_classification` head on top.\n ')
class MarkupLMForTokenClassification(MarkupLMPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, xpath_tags_seq: Optional[torch.Tensor]=None, xpath_subs_seq: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
'''
xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Tag IDs for each token in the input sequence, padded up to config.max_depth.
xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Subscript IDs for each token in the input sequence, padded up to config.max_depth.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForTokenClassification
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
>>> processor.parse_html = False
>>> model = AutoModelForTokenClassification.from_pretrained("microsoft/markuplm-base", num_labels=7)
>>> nodes = ["hello", "world"]
>>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"]
>>> node_labels = [1, 2]
>>> encoding = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```'''
pass
| 6
| 1
| 46
| 7
| 29
| 11
| 4
| 0.38
| 1
| 6
| 3
| 0
| 2
| 4
| 2
| 4
| 97
| 15
| 60
| 29
| 41
| 23
| 22
| 14
| 19
| 5
| 2
| 1
| 7
|
3,609
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMIntermediate
|
from ...activations import ACT2FN
from torch import nn
import torch
class MarkupLMIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class MarkupLMIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
3,610
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMLMPredictionHead
|
import torch
from torch import nn
class MarkupLMLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = MarkupLMPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def _tie_weights(self):
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
|
class MarkupLMLMPredictionHead(nn.Module):
def __init__(self, config):
pass
def _tie_weights(self):
pass
def forward(self, hidden_states):
pass
| 4
| 0
| 6
| 1
| 4
| 1
| 1
| 0.23
| 1
| 2
| 1
| 0
| 3
| 3
| 3
| 13
| 21
| 5
| 13
| 7
| 9
| 3
| 13
| 7
| 9
| 1
| 1
| 0
| 3
|
3,611
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMLayer
|
from typing import Callable, Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch
from ...modeling_layers import GradientCheckpointingLayer
class MarkupLMLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = MarkupLMAttention(config)
self.intermediate = MarkupLMIntermediate(config)
self.output = MarkupLMOutput(config)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, **kwargs)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class MarkupLMLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 84
| 9
| 70
| 32
| 57
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
3,612
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMModel
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, can_return_tuple, logging
from typing import Callable, Optional, Union
@auto_docstring
class MarkupLMModel(MarkupLMPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = MarkupLMEmbeddings(config)
self.encoder = MarkupLMEncoder(config)
self.pooler = MarkupLMPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, xpath_tags_seq: Optional[torch.LongTensor]=None, xpath_subs_seq: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
"""
xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Tag IDs for each token in the input sequence, padded up to config.max_depth.
xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Subscript IDs for each token in the input sequence, padded up to config.max_depth.
Examples:
```python
>>> from transformers import AutoProcessor, MarkupLMModel
>>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
>>> model = MarkupLMModel.from_pretrained("microsoft/markuplm-base")
>>> html_string = "<html> <head> <title>Page Title</title> </head> </html>"
>>> encoding = processor(html_string, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 768]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids=input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class MarkupLMModel(MarkupLMPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, xpath_tags_seq: Optional[torch.LongTensor]=None, xpath_subs_seq: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
'''
xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Tag IDs for each token in the input sequence, padded up to config.max_depth.
xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Subscript IDs for each token in the input sequence, padded up to config.max_depth.
Examples:
```python
>>> from transformers import AutoProcessor, MarkupLMModel
>>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
>>> model = MarkupLMModel.from_pretrained("microsoft/markuplm-base")
>>> html_string = "<html> <head> <title>Page Title</title> </head> </html>"
>>> encoding = processor(html_string, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 768]
```'''
pass
| 9
| 3
| 22
| 3
| 16
| 3
| 4
| 0.22
| 1
| 8
| 4
| 0
| 6
| 4
| 6
| 8
| 141
| 24
| 96
| 35
| 74
| 21
| 52
| 21
| 45
| 15
| 2
| 2
| 23
|
3,613
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMOnlyMLMHead
|
import torch
from torch import nn
class MarkupLMOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MarkupLMLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
|
class MarkupLMOnlyMLMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
3,614
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMOutput
|
from torch import nn
import torch
class MarkupLMOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class MarkupLMOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,615
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMPooler
|
from torch import nn
import torch
class MarkupLMPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class MarkupLMPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
3,616
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMPreTrainedModel
|
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import os
from ...utils import auto_docstring, can_return_tuple, logging
from typing import Callable, Optional, Union
from .configuration_markuplm import MarkupLMConfig
@auto_docstring
class MarkupLMPreTrainedModel(PreTrainedModel):
config: MarkupLMConfig
base_model_prefix = 'markuplm'
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, MarkupLMLMPredictionHead):
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
@auto_docstring
class MarkupLMPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
pass
| 5
| 1
| 10
| 0
| 8
| 2
| 4
| 0.4
| 1
| 3
| 0
| 4
| 1
| 0
| 2
| 2
| 31
| 3
| 20
| 6
| 16
| 8
| 15
| 5
| 12
| 6
| 1
| 2
| 7
|
3,617
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMPredictionHeadTransform
|
from torch import nn
from ...activations import ACT2FN
import torch
class MarkupLMPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class MarkupLMPredictionHeadTransform(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 15
| 1
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
3,618
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMSelfAttention
|
from typing import Callable, Optional, Union
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
class MarkupLMSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.attention_dropout = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size ** (-0.5)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_states = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, head_mask=head_mask, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
|
class MarkupLMSelfAttention(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 0
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
3,619
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.MarkupLMSelfOutput
|
from torch import nn
import torch
class MarkupLMSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class MarkupLMSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,620
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/modeling_markuplm.py
|
transformers.models.markuplm.modeling_markuplm.XPathEmbeddings
|
import torch
from torch import nn
class XPathEmbeddings(nn.Module):
"""Construct the embeddings from xpath tags and subscripts.
We drop tree-id in this version, as its info can be covered by xpath.
"""
def __init__(self, config):
super().__init__()
self.max_depth = config.max_depth
self.xpath_unitseq2_embeddings = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.activation = nn.ReLU()
self.xpath_unitseq2_inner = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, 4 * config.hidden_size)
self.inner2emb = nn.Linear(4 * config.hidden_size, config.hidden_size)
self.xpath_tag_sub_embeddings = nn.ModuleList([nn.Embedding(config.max_xpath_tag_unit_embeddings, config.xpath_unit_hidden_size) for _ in range(self.max_depth)])
self.xpath_subs_sub_embeddings = nn.ModuleList([nn.Embedding(config.max_xpath_subs_unit_embeddings, config.xpath_unit_hidden_size) for _ in range(self.max_depth)])
def forward(self, xpath_tags_seq=None, xpath_subs_seq=None):
xpath_tags_embeddings = []
xpath_subs_embeddings = []
for i in range(self.max_depth):
xpath_tags_embeddings.append(self.xpath_tag_sub_embeddings[i](xpath_tags_seq[:, :, i]))
xpath_subs_embeddings.append(self.xpath_subs_sub_embeddings[i](xpath_subs_seq[:, :, i]))
xpath_tags_embeddings = torch.cat(xpath_tags_embeddings, dim=-1)
xpath_subs_embeddings = torch.cat(xpath_subs_embeddings, dim=-1)
xpath_embeddings = xpath_tags_embeddings + xpath_subs_embeddings
xpath_embeddings = self.inner2emb(self.dropout(self.activation(self.xpath_unitseq2_inner(xpath_embeddings))))
return xpath_embeddings
|
class XPathEmbeddings(nn.Module):
'''Construct the embeddings from xpath tags and subscripts.
We drop tree-id in this version, as its info can be covered by xpath.
'''
def __init__(self, config):
pass
def forward(self, xpath_tags_seq=None, xpath_subs_seq=None):
pass
| 3
| 1
| 21
| 5
| 16
| 0
| 2
| 0.09
| 1
| 2
| 0
| 0
| 2
| 8
| 2
| 12
| 48
| 13
| 32
| 16
| 29
| 3
| 22
| 15
| 19
| 2
| 1
| 1
| 3
|
3,621
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/processing_markuplm.py
|
transformers.models.markuplm.processing_markuplm.MarkupLMProcessor
|
from ...processing_utils import ProcessorMixin
from typing import Optional, Union
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TruncationStrategy
from ...file_utils import TensorType
class MarkupLMProcessor(ProcessorMixin):
"""
Constructs a MarkupLM processor which combines a MarkupLM feature extractor and a MarkupLM tokenizer into a single
processor.
[`MarkupLMProcessor`] offers all the functionalities you need to prepare data for the model.
It first uses [`MarkupLMFeatureExtractor`] to extract nodes and corresponding xpaths from one or more HTML strings.
Next, these are provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which turns them into token-level
`input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and `xpath_subs_seq`.
Args:
feature_extractor (`MarkupLMFeatureExtractor`):
An instance of [`MarkupLMFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`MarkupLMTokenizer` or `MarkupLMTokenizerFast`):
An instance of [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. The tokenizer is a required input.
parse_html (`bool`, *optional*, defaults to `True`):
Whether or not to use `MarkupLMFeatureExtractor` to parse HTML strings into nodes and corresponding xpaths.
"""
feature_extractor_class = 'MarkupLMFeatureExtractor'
tokenizer_class = ('MarkupLMTokenizer', 'MarkupLMTokenizerFast')
parse_html = True
def __call__(self, html_strings=None, nodes=None, xpaths=None, node_labels=None, questions=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:
"""
This method first forwards the `html_strings` argument to [`~MarkupLMFeatureExtractor.__call__`]. Next, it
passes the `nodes` and `xpaths` along with the additional arguments to [`~MarkupLMTokenizer.__call__`] and
returns the output.
Optionally, one can also provide a `text` argument which is passed along as first sequence.
Please refer to the docstring of the above two methods for more information.
"""
if self.parse_html:
if html_strings is None:
raise ValueError('Make sure to pass HTML strings in case `parse_html` is set to `True`')
if nodes is not None or xpaths is not None or node_labels is not None:
raise ValueError("Please don't pass nodes, xpaths nor node labels in case `parse_html` is set to `True`")
features = self.feature_extractor(html_strings)
nodes = features['nodes']
xpaths = features['xpaths']
else:
if html_strings is not None:
raise ValueError('You have passed HTML strings but `parse_html` is set to `False`.')
if nodes is None or xpaths is None:
raise ValueError('Make sure to pass nodes and xpaths in case `parse_html` is set to `False`')
if questions is not None and self.parse_html:
if isinstance(questions, str):
questions = [questions]
encoded_inputs = self.tokenizer(text=questions if questions is not None else nodes, text_pair=nodes if questions is not None else None, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)
return encoded_inputs
|
class MarkupLMProcessor(ProcessorMixin):
'''
Constructs a MarkupLM processor which combines a MarkupLM feature extractor and a MarkupLM tokenizer into a single
processor.
[`MarkupLMProcessor`] offers all the functionalities you need to prepare data for the model.
It first uses [`MarkupLMFeatureExtractor`] to extract nodes and corresponding xpaths from one or more HTML strings.
Next, these are provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which turns them into token-level
`input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and `xpath_subs_seq`.
Args:
feature_extractor (`MarkupLMFeatureExtractor`):
An instance of [`MarkupLMFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`MarkupLMTokenizer` or `MarkupLMTokenizerFast`):
An instance of [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. The tokenizer is a required input.
parse_html (`bool`, *optional*, defaults to `True`):
Whether or not to use `MarkupLMFeatureExtractor` to parse HTML strings into nodes and corresponding xpaths.
'''
def __call__(self, html_strings=None, nodes=None, xpaths=None, node_labels=None, questions=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:
'''
This method first forwards the `html_strings` argument to [`~MarkupLMFeatureExtractor.__call__`]. Next, it
passes the `nodes` and `xpaths` along with the additional arguments to [`~MarkupLMTokenizer.__call__`] and
returns the output.
Optionally, one can also provide a `text` argument which is passed along as first sequence.
Please refer to the docstring of the above two methods for more information.
'''
pass
| 2
| 2
| 24
| 2
| 18
| 5
| 3
| 0.44
| 1
| 6
| 2
| 0
| 4
| 0
| 4
| 21
| 122
| 15
| 75
| 34
| 47
| 33
| 29
| 11
| 24
| 10
| 2
| 2
| 13
|
3,622
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/tokenization_markuplm.py
|
transformers.models.markuplm.tokenization_markuplm.MarkupLMTokenizer
|
import regex as re
from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings
import os
from ...tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy
import json
from typing import Optional, Union
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
class MarkupLMTokenizer(PreTrainedTokenizer):
"""
Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE). [`MarkupLMTokenizer`] can be used to
turn HTML strings into to token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and
`xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, merges_file, tags_dict, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, max_depth=50, max_width=1000, pad_width=1001, pad_token_label=-100, only_label_first_subword=True, **kwargs):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.tags_dict = tags_dict
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
self.max_depth = max_depth
self.max_width = max_width
self.pad_width = pad_width
self.unk_tag_id = len(self.tags_dict)
self.pad_tag_id = self.unk_tag_id + 1
self.pad_xpath_tags_seq = [self.pad_tag_id] * self.max_depth
self.pad_xpath_subs_seq = [self.pad_width] * self.max_depth
super().__init__(vocab_file=vocab_file, merges_file=merges_file, tags_dict=tags_dict, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, max_depth=max_depth, max_width=max_width, pad_width=pad_width, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs)
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
def get_xpath_seq(self, xpath):
"""
Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of
tag IDs and corresponding subscripts, taking into account max depth.
"""
xpath_tags_list = []
xpath_subs_list = []
xpath_units = xpath.split('/')
for unit in xpath_units:
if not unit.strip():
continue
name_subs = unit.strip().split('[')
tag_name = name_subs[0]
sub = 0 if len(name_subs) == 1 else int(name_subs[1][:-1])
xpath_tags_list.append(self.tags_dict.get(tag_name, self.unk_tag_id))
xpath_subs_list.append(min(self.max_width, sub))
xpath_tags_list = xpath_tags_list[:self.max_depth]
xpath_subs_list = xpath_subs_list[:self.max_depth]
xpath_tags_list += [self.pad_tag_id] * (self.max_depth - len(xpath_tags_list))
xpath_subs_list += [self.pad_width] * (self.max_depth - len(xpath_subs_list))
return (xpath_tags_list, xpath_subs_list)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
vocab = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
logger.warning('MarkupLM now does not support generative tasks, decoding is experimental and subject to change.')
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write(' '.join(bpe_tokens) + '\n')
index += 1
return (vocab_file, merge_file)
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())):
text = ' ' + text
return (text, kwargs)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def build_xpath_tags_with_special_tokens(self, xpath_tags_0: list[int], xpath_tags_1: Optional[list[int]]=None) -> list[int]:
pad = [self.pad_xpath_tags_seq]
if len(xpath_tags_1) == 0:
return pad + xpath_tags_0 + pad
return pad + xpath_tags_0 + pad + xpath_tags_1 + pad
def build_xpath_subs_with_special_tokens(self, xpath_subs_0: list[int], xpath_subs_1: Optional[list[int]]=None) -> list[int]:
pad = [self.pad_xpath_subs_seq]
if len(xpath_subs_1) == 0:
return pad + xpath_subs_0 + pad
return pad + xpath_subs_0 + pad + xpath_subs_1 + pad
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Args:
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0]
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, xpaths: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, node_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with node-level xpaths and optional labels.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(nodes of a single example or questions of a batch of examples) or a list of list of strings (batch of
nodes).
text_pair (`list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
xpaths (`list[list[int]]`, `list[list[list[int]]]`):
Node-level xpaths.
node_labels (`list[int]`, `list[list[int]]`, *optional*):
Node-level integer labels (for token classification tasks).
"""
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if len(t) == 0:
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
if not _is_valid_text_input(text):
raise ValueError('text input must of type `str` (single example) or `list[str]` (batch of examples). ')
if not isinstance(text_pair, (list, tuple)):
raise ValueError('Nodes must be of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).')
elif not isinstance(text, (list, tuple)):
raise ValueError('Nodes must be of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).')
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
nodes = text if text_pair is None else text_pair
assert xpaths is not None, 'You must provide corresponding xpaths'
if is_batched:
assert len(nodes) == len(xpaths), 'You must provide nodes and xpaths for an equal amount of examples'
for nodes_example, xpaths_example in zip(nodes, xpaths):
assert len(nodes_example) == len(xpaths_example), 'You must provide as many nodes as there are xpaths'
else:
assert len(nodes) == len(xpaths), 'You must provide as many nodes as there are xpaths'
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, xpaths: Optional[list[list[list[int]]]]=None, node_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, xpaths: Optional[list[list[list[int]]]]=None, node_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.')
batch_outputs = self._batch_prepare_for_model(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose)
return BatchEncoding(batch_outputs)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens.
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
"""
batch_outputs = {}
for idx, example in enumerate(zip(batch_text_or_text_pairs, xpaths)):
batch_text_or_text_pair, xpaths_example = example
outputs = self.prepare_for_model(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, xpaths_example, node_labels=node_labels[idx] if node_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
def encode(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> list[int]:
encoded_inputs = self.encode_plus(text=text, text_pair=text_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
return encoded_inputs['input_ids']
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a
list of list of strings (nodes of a batch of examples).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, xpaths=xpaths, text_pair=text_pair, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674')
return self.prepare_for_model(text=text, text_pair=text_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
"""
Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
truncates sequences if overflowing while taking into account the special tokens and manages a moving window
(with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
*truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
combination of arguments will raise an error.
Node-level `xpaths` are turned into token-level `xpath_tags_seq` and `xpath_subs_seq`. If provided, node-level
`node_labels` are turned into token-level `labels`. The node label is used for the first token of the node,
while remaining tokens are labeled with -100, such that they will be ignored by the loss function.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a
list of list of strings (nodes of a batch of examples).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
tokens = []
pair_tokens = []
xpath_tags_seq = []
xpath_subs_seq = []
pair_xpath_tags_seq = []
pair_xpath_subs_seq = []
labels = []
if text_pair is None:
if node_labels is None:
for word, xpath in zip(text, xpaths):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath)
xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens))
xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens))
else:
for word, xpath, label in zip(text, xpaths, node_labels):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath)
xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens))
xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens))
if self.only_label_first_subword:
labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
else:
labels.extend([label] * len(word_tokens))
else:
tokens = self.tokenize(text)
xpath_tags_seq = [self.pad_xpath_tags_seq for _ in range(len(tokens))]
xpath_subs_seq = [self.pad_xpath_subs_seq for _ in range(len(tokens))]
for word, xpath in zip(text_pair, xpaths):
if len(word) < 1:
continue
word_tokens = self.tokenize(word)
pair_tokens.extend(word_tokens)
xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath)
pair_xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens))
pair_xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens))
ids = self.convert_tokens_to_ids(tokens)
pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
if return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and (pair_ids is not None):
raise ValueError('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.')
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
overflowing_tokens = []
overflowing_xpath_tags_seq = []
overflowing_xpath_subs_seq = []
overflowing_labels = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and (total_len > max_length):
ids, xpath_tags_seq, xpath_subs_seq, pair_ids, pair_xpath_tags_seq, pair_xpath_subs_seq, labels, overflowing_tokens, overflowing_xpath_tags_seq, overflowing_xpath_subs_seq, overflowing_labels = self.truncate_sequences(ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, pair_ids=pair_ids, pair_xpath_tags_seq=pair_xpath_tags_seq, pair_xpath_subs_seq=pair_xpath_subs_seq, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride)
if return_token_type_ids and (not add_special_tokens):
raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')
if return_token_type_ids is None:
return_token_type_ids = 'token_type_ids' in self.model_input_names
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
encoded_inputs = {}
if return_overflowing_tokens:
encoded_inputs['overflowing_tokens'] = overflowing_tokens
encoded_inputs['overflowing_xpath_tags_seq'] = overflowing_xpath_tags_seq
encoded_inputs['overflowing_xpath_subs_seq'] = overflowing_xpath_subs_seq
encoded_inputs['overflowing_labels'] = overflowing_labels
encoded_inputs['num_truncated_tokens'] = total_len - max_length
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
xpath_tags_ids = self.build_xpath_tags_with_special_tokens(xpath_tags_seq, pair_xpath_tags_seq)
xpath_subs_ids = self.build_xpath_subs_with_special_tokens(xpath_subs_seq, pair_xpath_subs_seq)
if labels:
labels = [self.pad_token_label] + labels + [self.pad_token_label]
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
xpath_tags_ids = xpath_tags_seq + pair_xpath_tags_seq if pair else xpath_tags_seq
xpath_subs_ids = xpath_subs_seq + pair_xpath_subs_seq if pair else xpath_subs_seq
encoded_inputs['input_ids'] = sequence
encoded_inputs['xpath_tags_seq'] = xpath_tags_ids
encoded_inputs['xpath_subs_seq'] = xpath_subs_ids
if return_token_type_ids:
encoded_inputs['token_type_ids'] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs['special_tokens_mask'] = [0] * len(sequence)
if labels:
encoded_inputs['labels'] = labels
self._eventual_warn_about_too_long_sequence(encoded_inputs['input_ids'], max_length, verbose)
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
if return_length:
encoded_inputs['length'] = len(encoded_inputs['input_ids'])
batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)
return batch_outputs
def truncate_sequences(self, ids: list[int], xpath_tags_seq: list[list[int]], xpath_subs_seq: list[list[int]], pair_ids: Optional[list[int]]=None, pair_xpath_tags_seq: Optional[list[list[int]]]=None, pair_xpath_subs_seq: Optional[list[list[int]]]=None, labels: Optional[list[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[str, TruncationStrategy]='longest_first', stride: int=0) -> tuple[list[int], list[int], list[int]]:
"""
Args:
Truncates a sequence pair in-place following the strategy.
ids (`list[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
xpath_tags_seq (`list[list[int]]`):
XPath tag IDs of the first sequence.
xpath_subs_seq (`list[list[int]]`):
XPath sub IDs of the first sequence.
pair_ids (`list[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
pair_xpath_tags_seq (`list[list[int]]`, *optional*):
XPath tag IDs of the second sequence.
pair_xpath_subs_seq (`list[list[int]]`, *optional*):
XPath sub IDs of the second sequence.
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to
`False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`tuple[list[int], list[int], list[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
of sequences (or a batch of pairs) is provided.
"""
if num_tokens_to_remove <= 0:
return (ids, xpath_tags_seq, xpath_subs_seq, pair_ids, pair_xpath_tags_seq, pair_xpath_subs_seq, [], [], [])
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
overflowing_xpath_tags_seq = []
overflowing_xpath_subs_seq = []
overflowing_labels = []
if truncation_strategy == TruncationStrategy.ONLY_FIRST or (truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None):
if len(ids) > num_tokens_to_remove:
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
overflowing_xpath_tags_seq = xpath_tags_seq[-window_len:]
overflowing_xpath_subs_seq = xpath_subs_seq[-window_len:]
ids = ids[:-num_tokens_to_remove]
xpath_tags_seq = xpath_tags_seq[:-num_tokens_to_remove]
xpath_subs_seq = xpath_subs_seq[:-num_tokens_to_remove]
labels = labels[:-num_tokens_to_remove]
else:
error_msg = f'We need to remove {num_tokens_to_remove} to truncate the input but the first sequence has a length {len(ids)}. '
if truncation_strategy == TruncationStrategy.ONLY_FIRST:
error_msg = error_msg + f"Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_second'."
logger.error(error_msg)
elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
logger.warning(f"Be aware, overflowing tokens are not returned for the setting you have chosen, i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' truncation strategy. So the returned list will always be empty even if some tokens have been removed.")
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
ids = ids[:-1]
xpath_tags_seq = xpath_tags_seq[:-1]
xpath_subs_seq = xpath_subs_seq[:-1]
labels = labels[:-1]
else:
pair_ids = pair_ids[:-1]
pair_xpath_tags_seq = pair_xpath_tags_seq[:-1]
pair_xpath_subs_seq = pair_xpath_subs_seq[:-1]
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
if len(pair_ids) > num_tokens_to_remove:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
overflowing_xpath_tags_seq = pair_xpath_tags_seq[-window_len:]
overflowing_xpath_subs_seq = pair_xpath_subs_seq[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
pair_xpath_tags_seq = pair_xpath_tags_seq[:-num_tokens_to_remove]
pair_xpath_subs_seq = pair_xpath_subs_seq[:-num_tokens_to_remove]
else:
logger.error(f"We need to remove {num_tokens_to_remove} to truncate the input but the second sequence has a length {len(pair_ids)}. Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_first'.")
return (ids, xpath_tags_seq, xpath_subs_seq, pair_ids, pair_xpath_tags_seq, pair_xpath_subs_seq, labels, overflowing_tokens, overflowing_xpath_tags_seq, overflowing_xpath_subs_seq, overflowing_labels)
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Args:
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
if return_attention_mask and 'attention_mask' not in encoded_inputs:
encoded_inputs['attention_mask'] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == 'right':
if return_attention_mask:
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference
if 'xpath_tags_seq' in encoded_inputs:
encoded_inputs['xpath_tags_seq'] = encoded_inputs['xpath_tags_seq'] + [self.pad_xpath_tags_seq] * difference
if 'xpath_subs_seq' in encoded_inputs:
encoded_inputs['xpath_subs_seq'] = encoded_inputs['xpath_subs_seq'] + [self.pad_xpath_subs_seq] * difference
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == 'left':
if return_attention_mask:
encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids']
if 'xpath_tags_seq' in encoded_inputs:
encoded_inputs['xpath_tags_seq'] = [self.pad_xpath_tags_seq] * difference + encoded_inputs['xpath_tags_seq']
if 'xpath_subs_seq' in encoded_inputs:
encoded_inputs['xpath_subs_seq'] = [self.pad_xpath_subs_seq] * difference + encoded_inputs['xpath_subs_seq']
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels']
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError('Invalid padding strategy:' + str(padding_side))
return encoded_inputs
|
class MarkupLMTokenizer(PreTrainedTokenizer):
'''
Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE). [`MarkupLMTokenizer`] can be used to
turn HTML strings into to token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and
`xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
'''
def __init__(self, vocab_file, merges_file, tags_dict, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, max_depth=50, max_width=1000, pad_width=1001, pad_token_label=-100, only_label_first_subword=True, **kwargs):
pass
def get_xpath_seq(self, xpath):
'''
Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of
tag IDs and corresponding subscripts, taking into account max depth.
'''
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def bpe(self, token):
pass
def _tokenize(self, text):
'''Tokenize a string.'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def build_xpath_tags_with_special_tokens(self, xpath_tags_0: list[int], xpath_tags_1: Optional[list[int]]=None) -> list[int]:
pass
def build_xpath_subs_with_special_tokens(self, xpath_subs_0: list[int], xpath_subs_1: Optional[list[int]]=None) -> list[int]:
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Args:
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, xpaths: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, node_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with node-level xpaths and optional labels.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(nodes of a single example or questions of a batch of examples) or a list of list of strings (batch of
nodes).
text_pair (`list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
xpaths (`list[list[int]]`, `list[list[list[int]]]`):
Node-level xpaths.
node_labels (`list[int]`, `list[list[int]]`, *optional*):
Node-level integer labels (for token classification tasks).
'''
pass
def _is_valid_text_input(t):
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, xpaths: Optional[list[list[list[int]]]]=None, node_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, xpaths: Optional[list[list[list[int]]]]=None, node_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
'''
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens.
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
'''
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
def encode(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> list[int]:
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a
list of list of strings (nodes of a batch of examples).
'''
pass
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
'''
Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
truncates sequences if overflowing while taking into account the special tokens and manages a moving window
(with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
*truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
combination of arguments will raise an error.
Node-level `xpaths` are turned into token-level `xpath_tags_seq` and `xpath_subs_seq`. If provided, node-level
`node_labels` are turned into token-level `labels`. The node label is used for the first token of the node,
while remaining tokens are labeled with -100, such that they will be ignored by the loss function.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a
list of list of strings (nodes of a batch of examples).
'''
pass
def truncate_sequences(self, ids: list[int], xpath_tags_seq: list[list[int]], xpath_subs_seq: list[list[int]], pair_ids: Optional[list[int]]=None, pair_xpath_tags_seq: Optional[list[list[int]]]=None, pair_xpath_subs_seq: Optional[list[list[int]]]=None, labels: Optional[list[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[str, TruncationStrategy]='longest_first', stride: int=0) -> tuple[list[int], list[int], list[int]]:
'''
Args:
Truncates a sequence pair in-place following the strategy.
ids (`list[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
xpath_tags_seq (`list[list[int]]`):
XPath tag IDs of the first sequence.
xpath_subs_seq (`list[list[int]]`):
XPath sub IDs of the first sequence.
pair_ids (`list[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
pair_xpath_tags_seq (`list[list[int]]`, *optional*):
XPath tag IDs of the second sequence.
pair_xpath_subs_seq (`list[list[int]]`, *optional*):
XPath sub IDs of the second sequence.
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to
`False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`tuple[list[int], list[int], list[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
of sequences (or a batch of pairs) is provided.
'''
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
'''
Args:
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
| 35
| 15
| 47
| 3
| 37
| 8
| 5
| 0.25
| 1
| 16
| 2
| 0
| 26
| 19
| 26
| 115
| 1,341
| 108
| 998
| 375
| 730
| 246
| 406
| 130
| 378
| 31
| 3
| 4
| 142
|
3,623
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/markuplm/tokenization_markuplm_fast.py
|
transformers.models.markuplm.tokenization_markuplm_fast.MarkupLMTokenizerFast
|
from tokenizers import processors
from .tokenization_markuplm import MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, MarkupLMTokenizer
from typing import Optional, Union
from ...tokenization_utils_fast import PreTrainedTokenizerFast
import json
from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings
from ...tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy
class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE).
[`MarkupLMTokenizerFast`] can be used to turn HTML strings into to token-level `input_ids`, `attention_mask`,
`token_type_ids`, `xpath_tags_seq` and `xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which
contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = MarkupLMTokenizer
def __init__(self, vocab_file, merges_file, tags_dict, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, max_depth=50, max_width=1000, pad_width=1001, pad_token_label=-100, only_label_first_subword=True, trim_offsets=False, **kwargs):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(vocab_file=vocab_file, merges_file=merges_file, tags_dict=tags_dict, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, max_depth=max_depth, max_width=max_width, pad_width=pad_width, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs)
if trim_offsets:
raise NotImplementedError('`trim_offsets=True` is not implemented for MarkupLMTokenizerFast. Please set it to False.')
self.tags_dict = tags_dict
tokenizer_component = 'post_processor'
tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
if tokenizer_component_instance:
state = json.loads(tokenizer_component_instance.__getstate__())
if 'sep' in state:
state['sep'] = tuple(state['sep'])
if 'cls' in state:
state['cls'] = tuple(state['cls'])
changes_to_apply = False
if state.get('add_prefix_space', add_prefix_space) != add_prefix_space:
state['add_prefix_space'] = add_prefix_space
changes_to_apply = True
if changes_to_apply:
component_class = getattr(processors, state.pop('type'))
new_value = component_class(**state)
setattr(self.backend_tokenizer, tokenizer_component, new_value)
self.max_depth = max_depth
self.max_width = max_width
self.pad_width = pad_width
self.unk_tag_id = len(self.tags_dict)
self.pad_tag_id = self.unk_tag_id + 1
self.pad_xpath_tags_seq = [self.pad_tag_id] * self.max_depth
self.pad_xpath_subs_seq = [self.pad_width] * self.max_depth
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
def get_xpath_seq(self, xpath):
"""
Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of
tag IDs and corresponding subscripts, taking into account max depth.
"""
xpath_tags_list = []
xpath_subs_list = []
xpath_units = xpath.split('/')
for unit in xpath_units:
if not unit.strip():
continue
name_subs = unit.strip().split('[')
tag_name = name_subs[0]
sub = 0 if len(name_subs) == 1 else int(name_subs[1][:-1])
xpath_tags_list.append(self.tags_dict.get(tag_name, self.unk_tag_id))
xpath_subs_list.append(min(self.max_width, sub))
xpath_tags_list = xpath_tags_list[:self.max_depth]
xpath_subs_list = xpath_subs_list[:self.max_depth]
xpath_tags_list += [self.pad_tag_id] * (self.max_depth - len(xpath_tags_list))
xpath_subs_list += [self.pad_width] * (self.max_depth - len(xpath_subs_list))
return (xpath_tags_list, xpath_subs_list)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, xpaths: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, node_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with nodes, xpaths and optional labels.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
xpaths (`list[list[int]]`, `list[list[list[int]]]`):
Node-level xpaths. Each bounding box should be normalized to be on a 0-1000 scale.
node_labels (`list[int]`, `list[list[int]]`, *optional*):
Node-level integer labels (for token classification tasks).
"""
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if len(t) == 0:
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
if not _is_valid_text_input(text):
raise ValueError('text input must of type `str` (single example) or `list[str]` (batch of examples). ')
if not isinstance(text_pair, (list, tuple)):
raise ValueError('Nodes must be of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).')
elif not isinstance(text, (list, tuple)):
raise ValueError('Nodes must be of type `list[str]` (single pretokenized example), or `list[list[str]]` (batch of pretokenized examples).')
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
nodes = text if text_pair is None else text_pair
assert xpaths is not None, 'You must provide corresponding xpaths'
if is_batched:
assert len(nodes) == len(xpaths), 'You must provide nodes and xpaths for an equal amount of examples'
for nodes_example, xpaths_example in zip(nodes, xpaths):
assert len(nodes_example) == len(xpaths_example), 'You must provide as many nodes as there are xpaths'
else:
assert len(nodes) == len(xpaths), 'You must provide as many nodes as there are xpaths'
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, xpaths: Optional[list[list[list[int]]]]=None, node_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]:
batched_input = [(text, pair)] if pair else [text]
encodings = self._tokenizer.encode_batch(batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs)
return encodings[0].tokens
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, xpaths=xpaths, text_pair=text_pair, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, xpaths: Optional[list[list[list[int]]]]=None, node_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise TypeError(f'batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})')
self.set_truncation_and_padding(padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side)
if is_pair:
batch_text_or_text_pairs = [([text], text_pair) for text, text_pair in batch_text_or_text_pairs]
encodings = self._tokenizer.encode_batch(batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True)
tokens_and_encodings = [self._convert_encoding(encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if node_labels is not None else return_offsets_mapping, return_length=return_length, verbose=verbose) for encoding in encodings]
sanitized_tokens = {}
for key in tokens_and_encodings[0][0]:
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks['input_ids'])
sanitized_tokens['overflow_to_sample_mapping'] = overflow_to_sample_mapping
for input_ids in sanitized_tokens['input_ids']:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
xpath_tags_seq = []
xpath_subs_seq = []
for batch_index in range(len(sanitized_tokens['input_ids'])):
if return_overflowing_tokens:
original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]
else:
original_index = batch_index
xpath_tags_seq_example = []
xpath_subs_seq_example = []
for id, sequence_id, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids):
if word_id is not None:
if is_pair and sequence_id == 0:
xpath_tags_seq_example.append(self.pad_xpath_tags_seq)
xpath_subs_seq_example.append(self.pad_xpath_subs_seq)
else:
xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpaths[original_index][word_id])
xpath_tags_seq_example.extend([xpath_tags_list])
xpath_subs_seq_example.extend([xpath_subs_list])
elif id in [self.cls_token_id, self.sep_token_id, self.pad_token_id]:
xpath_tags_seq_example.append(self.pad_xpath_tags_seq)
xpath_subs_seq_example.append(self.pad_xpath_subs_seq)
else:
raise ValueError('Id not recognized')
xpath_tags_seq.append(xpath_tags_seq_example)
xpath_subs_seq.append(xpath_subs_seq_example)
sanitized_tokens['xpath_tags_seq'] = xpath_tags_seq
sanitized_tokens['xpath_subs_seq'] = xpath_subs_seq
if node_labels is not None:
labels = []
for batch_index in range(len(sanitized_tokens['input_ids'])):
if return_overflowing_tokens:
original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]
else:
original_index = batch_index
labels_example = []
for id, offset, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_tokens['offset_mapping'][batch_index], sanitized_encodings[batch_index].word_ids):
if word_id is not None:
if self.only_label_first_subword:
if offset[0] == 0:
labels_example.append(node_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
else:
labels_example.append(node_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
labels.append(labels_example)
sanitized_tokens['labels'] = labels
if not return_offsets_mapping:
del sanitized_tokens['offset_mapping']
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
batched_input = [(text, text_pair)] if text_pair else [text]
batched_xpaths = [xpaths]
batched_node_labels = [node_labels] if node_labels is not None else None
batched_output = self._batch_encode_plus(batched_input, is_pair=bool(text_pair is not None), xpaths=batched_xpaths, node_labels=batched_node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
if return_tensors is None and (not return_overflowing_tokens):
batched_output = BatchEncoding({key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items()}, batched_output.encodings)
self._eventual_warn_about_too_long_sequence(batched_output['input_ids'], max_length, verbose)
return batched_output
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Args:
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
if return_attention_mask is None:
return_attention_mask = 'attention_mask' in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
if return_attention_mask and 'attention_mask' not in encoded_inputs:
encoded_inputs['attention_mask'] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == 'right':
if return_attention_mask:
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference
if 'xpath_tags_seq' in encoded_inputs:
encoded_inputs['xpath_tags_seq'] = encoded_inputs['xpath_tags_seq'] + [self.pad_xpath_tags_seq] * difference
if 'xpath_subs_seq' in encoded_inputs:
encoded_inputs['xpath_subs_seq'] = encoded_inputs['xpath_subs_seq'] + [self.pad_xpath_subs_seq] * difference
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == 'left':
if return_attention_mask:
encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']
if 'token_type_ids' in encoded_inputs:
encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids']
if 'xpath_tags_seq' in encoded_inputs:
encoded_inputs['xpath_tags_seq'] = [self.pad_xpath_tags_seq] * difference + encoded_inputs['xpath_tags_seq']
if 'xpath_subs_seq' in encoded_inputs:
encoded_inputs['xpath_subs_seq'] = [self.pad_xpath_subs_seq] * difference + encoded_inputs['xpath_subs_seq']
if 'labels' in encoded_inputs:
encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels']
if 'special_tokens_mask' in encoded_inputs:
encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError('Invalid padding strategy:' + str(padding_side))
return encoded_inputs
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE).
[`MarkupLMTokenizerFast`] can be used to turn HTML strings into to token-level `input_ids`, `attention_mask`,
`token_type_ids`, `xpath_tags_seq` and `xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which
contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
'''
def __init__(self, vocab_file, merges_file, tags_dict, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, max_depth=50, max_width=1000, pad_width=1001, pad_token_label=-100, only_label_first_subword=True, trim_offsets=False, **kwargs):
pass
def get_xpath_seq(self, xpath):
'''
Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of
tag IDs and corresponding subscripts, taking into account max depth.
'''
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]]=None, xpaths: Optional[Union[list[list[int]], list[list[list[int]]]]]=None, node_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with nodes, xpaths and optional labels.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
xpaths (`list[list[int]]`, `list[list[list[int]]]`):
Node-level xpaths. Each bounding box should be normalized to be on a 0-1000 scale.
node_labels (`list[int]`, `list[list[int]]`, *optional*):
Node-level integer labels (for token classification tasks).
'''
pass
def _is_valid_text_input(t):
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, xpaths: Optional[list[list[list[int]]]]=None, node_labels: Optional[Union[list[int], list[list[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]:
pass
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
'''
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
'''
pass
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput]], is_pair: Optional[bool]=None, xpaths: Optional[list[list[list[int]]]]=None, node_labels: Optional[list[list[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
pass
def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[list[list[int]]]=None, node_labels: Optional[list[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:
'''
Args:
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 17
| 7
| 61
| 4
| 47
| 10
| 7
| 0.27
| 1
| 16
| 2
| 0
| 12
| 10
| 12
| 100
| 844
| 72
| 609
| 234
| 441
| 165
| 232
| 80
| 218
| 22
| 3
| 6
| 93
|
3,624
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/configuration_mask2former.py
|
transformers.models.mask2former.configuration_mask2former.Mask2FormerConfig
|
from typing import Optional
from ..auto import CONFIG_MAPPING
from ...configuration_utils import PretrainedConfig
from ...utils.backbone_utils import verify_backbone_config_arguments
class Mask2FormerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Mask2FormerModel`]. It is used to instantiate a
Mask2Former model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Mask2Former
[facebook/mask2former-swin-small-coco-instance](https://huggingface.co/facebook/mask2former-swin-small-coco-instance)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Currently, Mask2Former only supports the [Swin Transformer](swin) as backbone.
Args:
backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `SwinConfig()`):
The configuration of the backbone model. If unset, the configuration corresponding to
`swin-base-patch4-window12-384` will be used.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
feature_size (`int`, *optional*, defaults to 256):
The features (channels) of the resulting feature maps.
mask_feature_size (`int`, *optional*, defaults to 256):
The masks' features size, this value will also be used to specify the Feature Pyramid Network features'
size.
hidden_dim (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers.
encoder_feedforward_dim (`int`, *optional*, defaults to 1024):
Dimension of feedforward network for deformable detr encoder used as part of pixel decoder.
encoder_layers (`int`, *optional*, defaults to 6):
Number of layers in the deformable detr encoder used as part of pixel decoder.
decoder_layers (`int`, *optional*, defaults to 10):
Number of layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder.
dim_feedforward (`int`, *optional*, defaults to 2048):
Feature dimension in feedforward network for transformer decoder.
pre_norm (`bool`, *optional*, defaults to `False`):
Whether to use pre-LayerNorm or not for transformer decoder.
enforce_input_projection (`bool`, *optional*, defaults to `False`):
Whether to add an input projection 1x1 convolution even if the input channels and hidden dim are identical
in the Transformer decoder.
common_stride (`int`, *optional*, defaults to 4):
Parameter used for determining number of FPN levels used as part of pixel decoder.
ignore_value (`int`, *optional*, defaults to 255):
Category id to be ignored during training.
num_queries (`int`, *optional*, defaults to 100):
Number of queries for the decoder.
no_object_weight (`int`, *optional*, defaults to 0.1):
The weight to apply to the null (no object) class.
class_weight (`int`, *optional*, defaults to 2.0):
The weight for the cross entropy loss.
mask_weight (`int`, *optional*, defaults to 5.0):
The weight for the mask loss.
dice_weight (`int`, *optional*, defaults to 5.0):
The weight for the dice loss.
train_num_points (`str` or `function`, *optional*, defaults to 12544):
Number of points used for sampling during loss calculation.
oversample_ratio (`float`, *optional*, defaults to 3.0):
Oversampling parameter used for calculating no. of sampled points
importance_sample_ratio (`float`, *optional*, defaults to 0.75):
Ratio of points that are sampled via importance sampling.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1.0):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
use_auxiliary_loss (`boolean``, *optional*, defaults to `True`):
If `True` [`Mask2FormerForUniversalSegmentationOutput`] will contain the auxiliary losses computed using
the logits from each decoder's stage.
feature_strides (`list[int]`, *optional*, defaults to `[4, 8, 16, 32]`):
Feature strides corresponding to features generated from backbone network.
output_auxiliary_logits (`bool`, *optional*):
Should the model output its `auxiliary_logits` or not.
Examples:
```python
>>> from transformers import Mask2FormerConfig, Mask2FormerModel
>>> # Initializing a Mask2Former facebook/mask2former-swin-small-coco-instance configuration
>>> configuration = Mask2FormerConfig()
>>> # Initializing a model (with random weights) from the facebook/mask2former-swin-small-coco-instance style configuration
>>> model = Mask2FormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'mask2former'
backbones_supported = ['swin']
attribute_map = {'hidden_size': 'hidden_dim'}
def __init__(self, backbone_config: Optional[dict]=None, feature_size: int=256, mask_feature_size: int=256, hidden_dim: int=256, encoder_feedforward_dim: int=1024, activation_function: str='relu', encoder_layers: int=6, decoder_layers: int=10, num_attention_heads: int=8, dropout: float=0.0, dim_feedforward: int=2048, pre_norm: bool=False, enforce_input_projection: bool=False, common_stride: int=4, ignore_value: int=255, num_queries: int=100, no_object_weight: float=0.1, class_weight: float=2.0, mask_weight: float=5.0, dice_weight: float=5.0, train_num_points: int=12544, oversample_ratio: float=3.0, importance_sample_ratio: float=0.75, init_std: float=0.02, init_xavier_std: float=1.0, use_auxiliary_loss: bool=True, feature_strides: list[int]=[4, 8, 16, 32], output_auxiliary_logits: Optional[bool]=None, backbone: Optional[str]=None, use_pretrained_backbone: bool=False, use_timm_backbone: bool=False, backbone_kwargs: Optional[dict]=None, **kwargs):
if backbone_config is None and backbone is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
backbone_config = CONFIG_MAPPING['swin'](image_size=224, num_channels=3, patch_size=4, embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=False, out_features=['stage1', 'stage2', 'stage3', 'stage4'])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.pop('model_type')
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)
if backbone_config is not None and backbone_config.model_type not in self.backbones_supported:
logger.warning_once(f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. Supported model types: {','.join(self.backbones_supported)}")
self.backbone_config = backbone_config
self.feature_size = feature_size
self.mask_feature_size = mask_feature_size
self.hidden_dim = hidden_dim
self.encoder_feedforward_dim = encoder_feedforward_dim
self.activation_function = activation_function
self.encoder_layers = encoder_layers
self.decoder_layers = decoder_layers
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.dim_feedforward = dim_feedforward
self.pre_norm = pre_norm
self.enforce_input_projection = enforce_input_projection
self.common_stride = common_stride
self.ignore_value = ignore_value
self.num_queries = num_queries
self.no_object_weight = no_object_weight
self.class_weight = class_weight
self.mask_weight = mask_weight
self.dice_weight = dice_weight
self.train_num_points = train_num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.use_auxiliary_loss = use_auxiliary_loss
self.feature_strides = feature_strides
self.output_auxiliary_logits = output_auxiliary_logits
self.num_hidden_layers = decoder_layers
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
super().__init__(**kwargs)
@property
def sub_configs(self):
return {'backbone_config': type(self.backbone_config)} if getattr(self, 'backbone_config', None) is not None else {}
@classmethod
def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):
"""Instantiate a [`Mask2FormerConfig`] (or a derived class) from a pre-trained backbone model configuration.
Args:
backbone_config ([`PretrainedConfig`]):
The backbone configuration.
Returns:
[`Mask2FormerConfig`]: An instance of a configuration object
"""
return cls(backbone_config=backbone_config, **kwargs)
|
class Mask2FormerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Mask2FormerModel`]. It is used to instantiate a
Mask2Former model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Mask2Former
[facebook/mask2former-swin-small-coco-instance](https://huggingface.co/facebook/mask2former-swin-small-coco-instance)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Currently, Mask2Former only supports the [Swin Transformer](swin) as backbone.
Args:
backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `SwinConfig()`):
The configuration of the backbone model. If unset, the configuration corresponding to
`swin-base-patch4-window12-384` will be used.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
feature_size (`int`, *optional*, defaults to 256):
The features (channels) of the resulting feature maps.
mask_feature_size (`int`, *optional*, defaults to 256):
The masks' features size, this value will also be used to specify the Feature Pyramid Network features'
size.
hidden_dim (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers.
encoder_feedforward_dim (`int`, *optional*, defaults to 1024):
Dimension of feedforward network for deformable detr encoder used as part of pixel decoder.
encoder_layers (`int`, *optional*, defaults to 6):
Number of layers in the deformable detr encoder used as part of pixel decoder.
decoder_layers (`int`, *optional*, defaults to 10):
Number of layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder.
dim_feedforward (`int`, *optional*, defaults to 2048):
Feature dimension in feedforward network for transformer decoder.
pre_norm (`bool`, *optional*, defaults to `False`):
Whether to use pre-LayerNorm or not for transformer decoder.
enforce_input_projection (`bool`, *optional*, defaults to `False`):
Whether to add an input projection 1x1 convolution even if the input channels and hidden dim are identical
in the Transformer decoder.
common_stride (`int`, *optional*, defaults to 4):
Parameter used for determining number of FPN levels used as part of pixel decoder.
ignore_value (`int`, *optional*, defaults to 255):
Category id to be ignored during training.
num_queries (`int`, *optional*, defaults to 100):
Number of queries for the decoder.
no_object_weight (`int`, *optional*, defaults to 0.1):
The weight to apply to the null (no object) class.
class_weight (`int`, *optional*, defaults to 2.0):
The weight for the cross entropy loss.
mask_weight (`int`, *optional*, defaults to 5.0):
The weight for the mask loss.
dice_weight (`int`, *optional*, defaults to 5.0):
The weight for the dice loss.
train_num_points (`str` or `function`, *optional*, defaults to 12544):
Number of points used for sampling during loss calculation.
oversample_ratio (`float`, *optional*, defaults to 3.0):
Oversampling parameter used for calculating no. of sampled points
importance_sample_ratio (`float`, *optional*, defaults to 0.75):
Ratio of points that are sampled via importance sampling.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1.0):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
use_auxiliary_loss (`boolean``, *optional*, defaults to `True`):
If `True` [`Mask2FormerForUniversalSegmentationOutput`] will contain the auxiliary losses computed using
the logits from each decoder's stage.
feature_strides (`list[int]`, *optional*, defaults to `[4, 8, 16, 32]`):
Feature strides corresponding to features generated from backbone network.
output_auxiliary_logits (`bool`, *optional*):
Should the model output its `auxiliary_logits` or not.
Examples:
```python
>>> from transformers import Mask2FormerConfig, Mask2FormerModel
>>> # Initializing a Mask2Former facebook/mask2former-swin-small-coco-instance configuration
>>> configuration = Mask2FormerConfig()
>>> # Initializing a model (with random weights) from the facebook/mask2former-swin-small-coco-instance style configuration
>>> model = Mask2FormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, backbone_config: Optional[dict]=None, feature_size: int=256, mask_feature_size: int=256, hidden_dim: int=256, encoder_feedforward_dim: int=1024, activation_function: str='relu', encoder_layers: int=6, decoder_layers: int=10, num_attention_heads: int=8, dropout: float=0.0, dim_feedforward: int=2048, pre_norm: bool=False, enforce_input_projection: bool=False, common_stride: int=4, ignore_value: int=255, num_queries: int=100, no_object_weight: float=0.1, class_weight: float=2.0, mask_weight: float=5.0, dice_weight: float=5.0, train_num_points: int=12544, oversample_ratio: float=3.0, importance_sample_ratio: float=0.75, init_std: float=0.02, init_xavier_std: float=1.0, use_auxiliary_loss: bool=True, feature_strides: list[int]=[4, 8, 16, 32], output_auxiliary_logits: Optional[bool]=None, backbone: Optional[str]=None, use_pretrained_backbone: bool=False, use_timm_backbone: bool=False, backbone_kwargs: Optional[dict]=None, **kwargs):
pass
@property
def sub_configs(self):
pass
@classmethod
def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):
'''Instantiate a [`Mask2FormerConfig`] (or a derived class) from a pre-trained backbone model configuration.
Args:
backbone_config ([`PretrainedConfig`]):
The backbone configuration.
Returns:
[`Mask2FormerConfig`]: An instance of a configuration object
'''
pass
| 6
| 2
| 59
| 3
| 53
| 4
| 3
| 0.9
| 1
| 6
| 0
| 0
| 1
| 33
| 2
| 2
| 226
| 17
| 110
| 77
| 71
| 99
| 50
| 41
| 47
| 4
| 1
| 1
| 5
|
3,625
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py
|
transformers.models.mask2former.convert_mask2former_original_pytorch_checkpoint_to_pytorch.Args
|
from dataclasses import dataclass
@dataclass
class Args:
"""Fake command line arguments needed by mask2former/detectron implementation"""
config_file: str
|
@dataclass
class Args:
'''Fake command line arguments needed by mask2former/detectron implementation'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 2
| 1
| 1
| 1
| 2
| 1
| 1
| 0
| 0
| 0
| 0
|
3,626
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py
|
transformers.models.mask2former.convert_mask2former_original_pytorch_checkpoint_to_pytorch.OriginalMask2FormerCheckpointToOursConverter
|
from torch import Tensor, nn
from collections.abc import Iterator
from pprint import pformat
from transformers import Mask2FormerConfig, Mask2FormerForUniversalSegmentation, Mask2FormerImageProcessor, Mask2FormerModel, SwinConfig
from pathlib import Path
class OriginalMask2FormerCheckpointToOursConverter:
def __init__(self, original_model: nn.Module, config: Mask2FormerConfig):
self.original_model = original_model
self.config = config
def pop_all(self, renamed_keys: list[tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
for src_key, dst_key in renamed_keys:
dst_state_dict[dst_key] = src_state_dict.pop(src_key)
def replace_maskformer_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: Mask2FormerConfig):
dst_prefix: str = 'pixel_level_module.encoder'
src_prefix: str = 'backbone'
renamed_keys = [(f'{src_prefix}.patch_embed.proj.weight', f'{dst_prefix}.model.embeddings.patch_embeddings.projection.weight'), (f'{src_prefix}.patch_embed.proj.bias', f'{dst_prefix}.model.embeddings.patch_embeddings.projection.bias'), (f'{src_prefix}.patch_embed.norm.weight', f'{dst_prefix}.model.embeddings.norm.weight'), (f'{src_prefix}.patch_embed.norm.bias', f'{dst_prefix}.model.embeddings.norm.bias')]
num_layers = len(config.backbone_config.depths)
for layer_idx in range(num_layers):
for block_idx in range(config.backbone_config.depths[layer_idx]):
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table')])
src_att_weight = src_state_dict[f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight']
src_att_bias = src_state_dict[f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias']
size = src_att_weight.shape[0]
offset = size // 3
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight'] = src_att_weight[:offset, :]
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias'] = src_att_bias[:offset]
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight'] = src_att_weight[offset:offset * 2, :]
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias'] = src_att_bias[offset:offset * 2]
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight'] = src_att_weight[-offset:, :]
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias'] = src_att_bias[-offset:]
src_state_dict.pop(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight')
src_state_dict.pop(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias')
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index')])
if layer_idx < num_layers - 1:
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.downsample.reduction.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.reduction.weight'), (f'{src_prefix}.layers.{layer_idx}.downsample.norm.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.weight'), (f'{src_prefix}.layers.{layer_idx}.downsample.norm.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.bias')])
renamed_keys.extend([(f'{src_prefix}.norm{layer_idx}.weight', f'{dst_prefix}.hidden_states_norms.{layer_idx}.weight'), (f'{src_prefix}.norm{layer_idx}.bias', f'{dst_prefix}.hidden_states_norms.{layer_idx}.bias')])
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: Mask2FormerConfig):
dst_prefix: str = 'pixel_level_module.encoder'
src_prefix: str = 'backbone'
renamed_keys = [(f'{src_prefix}.patch_embed.proj.weight', f'{dst_prefix}.embeddings.patch_embeddings.projection.weight'), (f'{src_prefix}.patch_embed.proj.bias', f'{dst_prefix}.embeddings.patch_embeddings.projection.bias'), (f'{src_prefix}.patch_embed.norm.weight', f'{dst_prefix}.embeddings.norm.weight'), (f'{src_prefix}.patch_embed.norm.bias', f'{dst_prefix}.embeddings.norm.bias')]
for layer_idx in range(len(config.backbone_config.depths)):
for block_idx in range(config.backbone_config.depths[layer_idx]):
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table')])
src_att_weight = src_state_dict[f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight']
src_att_bias = src_state_dict[f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias']
size = src_att_weight.shape[0]
offset = size // 3
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight'] = src_att_weight[:offset, :]
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias'] = src_att_bias[:offset]
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight'] = src_att_weight[offset:offset * 2, :]
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias'] = src_att_bias[offset:offset * 2]
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight'] = src_att_weight[-offset:, :]
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias'] = src_att_bias[-offset:]
src_state_dict.pop(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight')
src_state_dict.pop(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias')
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index')])
if layer_idx < 3:
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.downsample.reduction.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight'), (f'{src_prefix}.layers.{layer_idx}.downsample.norm.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight'), (f'{src_prefix}.layers.{layer_idx}.downsample.norm.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias')])
renamed_keys.extend([(f'{src_prefix}.norm{layer_idx}.weight', f'{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.weight'), (f'{src_prefix}.norm{layer_idx}.bias', f'{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.bias')])
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'pixel_level_module.decoder'
src_prefix: str = 'sem_seg_head.pixel_decoder'
self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config)
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [(f'{src_prefix}.weight', f'{dst_prefix}.weight'), (f'{src_prefix}.bias', f'{dst_prefix}.bias')]
def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
self_attn_keys = []
self_attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.attention_weights', f'{dst_prefix}.attention_weights'))
self_attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.output_proj', f'{dst_prefix}.output_proj'))
self_attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.sampling_offsets', f'{dst_prefix}.sampling_offsets'))
self_attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.value_proj', f'{dst_prefix}.value_proj'))
return self_attn_keys
def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str):
encoder_keys = []
encoder_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.linear1', f'{dst_prefix}.fc1'))
encoder_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.linear2', f'{dst_prefix}.fc2'))
encoder_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.norm1', f'{dst_prefix}.self_attn_layer_norm'))
encoder_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.norm2', f'{dst_prefix}.final_layer_norm'))
encoder_keys.extend(rename_keys_for_self_attn(f'{src_prefix}.self_attn', f'{dst_prefix}.self_attn'))
return encoder_keys
renamed_keys = [(f'{src_prefix}.adapter_1.weight', f'{dst_prefix}.adapter_1.0.weight'), (f'{src_prefix}.adapter_1.norm.weight', f'{dst_prefix}.adapter_1.1.weight'), (f'{src_prefix}.adapter_1.norm.bias', f'{dst_prefix}.adapter_1.1.bias')]
renamed_keys.extend([(f'{src_prefix}.layer_1.weight', f'{dst_prefix}.layer_1.0.weight'), (f'{src_prefix}.layer_1.norm.weight', f'{dst_prefix}.layer_1.1.weight'), (f'{src_prefix}.layer_1.norm.bias', f'{dst_prefix}.layer_1.1.bias')])
for i in range(3):
for j in range(2):
renamed_keys.extend([(f'{src_prefix}.input_proj.{i}.{j}.weight', f'{dst_prefix}.input_projections.{i}.{j}.weight'), (f'{src_prefix}.input_proj.{i}.{j}.bias', f'{dst_prefix}.input_projections.{i}.{j}.bias')])
renamed_keys.extend([(f'{src_prefix}.transformer.level_embed', f'{dst_prefix}.level_embed')])
for layer_idx in range(self.config.encoder_layers):
renamed_keys.extend(rename_keys_for_encoder_layer(f'{src_prefix}.transformer.encoder.layers.{layer_idx}', f'{dst_prefix}.encoder.layers.{layer_idx}'))
renamed_keys.extend([(f'{src_prefix}.mask_features.weight', f'{dst_prefix}.mask_projection.weight'), (f'{src_prefix}.mask_features.bias', f'{dst_prefix}.mask_projection.bias')])
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def rename_keys_in_masked_attention_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'transformer_module.decoder'
src_prefix: str = 'sem_seg_head.predictor'
rename_keys = []
for i in range(self.config.decoder_layers - 1):
rename_keys.append((f'{src_prefix}.transformer_self_attention_layers.{i}.self_attn.out_proj.weight', f'{dst_prefix}.layers.{i}.self_attn.out_proj.weight'))
rename_keys.append((f'{src_prefix}.transformer_self_attention_layers.{i}.self_attn.out_proj.bias', f'{dst_prefix}.layers.{i}.self_attn.out_proj.bias'))
rename_keys.append((f'{src_prefix}.transformer_self_attention_layers.{i}.norm.weight', f'{dst_prefix}.layers.{i}.self_attn_layer_norm.weight'))
rename_keys.append((f'{src_prefix}.transformer_self_attention_layers.{i}.norm.bias', f'{dst_prefix}.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.in_proj_weight', f'{dst_prefix}.layers.{i}.cross_attn.in_proj_weight'))
rename_keys.append((f'{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.in_proj_bias', f'{dst_prefix}.layers.{i}.cross_attn.in_proj_bias'))
rename_keys.append((f'{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.out_proj.weight', f'{dst_prefix}.layers.{i}.cross_attn.out_proj.weight'))
rename_keys.append((f'{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.out_proj.bias', f'{dst_prefix}.layers.{i}.cross_attn.out_proj.bias'))
rename_keys.append((f'{src_prefix}.transformer_cross_attention_layers.{i}.norm.weight', f'{dst_prefix}.layers.{i}.cross_attn_layer_norm.weight'))
rename_keys.append((f'{src_prefix}.transformer_cross_attention_layers.{i}.norm.bias', f'{dst_prefix}.layers.{i}.cross_attn_layer_norm.bias'))
rename_keys.append((f'{src_prefix}.transformer_ffn_layers.{i}.linear1.weight', f'{dst_prefix}.layers.{i}.fc1.weight'))
rename_keys.append((f'{src_prefix}.transformer_ffn_layers.{i}.linear1.bias', f'{dst_prefix}.layers.{i}.fc1.bias'))
rename_keys.append((f'{src_prefix}.transformer_ffn_layers.{i}.linear2.weight', f'{dst_prefix}.layers.{i}.fc2.weight'))
rename_keys.append((f'{src_prefix}.transformer_ffn_layers.{i}.linear2.bias', f'{dst_prefix}.layers.{i}.fc2.bias'))
rename_keys.append((f'{src_prefix}.transformer_ffn_layers.{i}.norm.weight', f'{dst_prefix}.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'{src_prefix}.transformer_ffn_layers.{i}.norm.bias', f'{dst_prefix}.layers.{i}.final_layer_norm.bias'))
return rename_keys
def replace_masked_attention_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'transformer_module.decoder'
src_prefix: str = 'sem_seg_head.predictor'
renamed_keys = self.rename_keys_in_masked_attention_decoder(dst_state_dict, src_state_dict)
renamed_keys.extend([(f'{src_prefix}.decoder_norm.weight', f'{dst_prefix}.layernorm.weight'), (f'{src_prefix}.decoder_norm.bias', f'{dst_prefix}.layernorm.bias')])
mlp_len = 3
for i in range(mlp_len):
renamed_keys.extend([(f'{src_prefix}.mask_embed.layers.{i}.weight', f'{dst_prefix}.mask_predictor.mask_embedder.{i}.0.weight'), (f'{src_prefix}.mask_embed.layers.{i}.bias', f'{dst_prefix}.mask_predictor.mask_embedder.{i}.0.bias')])
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'transformer_module.decoder.layers'
src_prefix: str = 'sem_seg_head.predictor'
for i in range(self.config.decoder_layers - 1):
in_proj_weight = src_state_dict.pop(f'{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight')
in_proj_bias = src_state_dict.pop(f'{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias')
dst_state_dict[f'{dst_prefix}.{i}.self_attn.q_proj.weight'] = in_proj_weight[:256, :]
dst_state_dict[f'{dst_prefix}.{i}.self_attn.q_proj.bias'] = in_proj_bias[:256]
dst_state_dict[f'{dst_prefix}.{i}.self_attn.k_proj.weight'] = in_proj_weight[256:512, :]
dst_state_dict[f'{dst_prefix}.{i}.self_attn.k_proj.bias'] = in_proj_bias[256:512]
dst_state_dict[f'{dst_prefix}.{i}.self_attn.v_proj.weight'] = in_proj_weight[-256:, :]
dst_state_dict[f'{dst_prefix}.{i}.self_attn.v_proj.bias'] = in_proj_bias[-256:]
def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'transformer_module'
src_prefix: str = 'sem_seg_head.predictor'
self.replace_masked_attention_decoder(dst_state_dict, src_state_dict)
renamed_keys = [(f'{src_prefix}.query_embed.weight', f'{dst_prefix}.queries_embedder.weight'), (f'{src_prefix}.query_feat.weight', f'{dst_prefix}.queries_features.weight'), (f'{src_prefix}.level_embed.weight', f'{dst_prefix}.level_embed.weight')]
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict)
def replace_universal_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = ''
src_prefix: str = 'sem_seg_head.predictor'
renamed_keys = [(f'{src_prefix}.class_embed.weight', f'{dst_prefix}class_predictor.weight'), (f'{src_prefix}.class_embed.bias', f'{dst_prefix}class_predictor.bias')]
logger.info(f'Replacing keys {pformat(renamed_keys)}')
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def convert(self, mask2former: Mask2FormerModel) -> Mask2FormerModel:
dst_state_dict = TrackedStateDict(mask2former.state_dict())
src_state_dict = self.original_model.state_dict()
self.replace_pixel_module(dst_state_dict, src_state_dict)
self.replace_transformer_module(dst_state_dict, src_state_dict)
logger.info(f'Missed keys are {pformat(dst_state_dict.diff())}')
logger.info(f'Not copied keys are {pformat(src_state_dict.keys())}')
logger.info('🙌 Done')
state_dict = {key: dst_state_dict[key] for key in dst_state_dict.to_track}
mask2former.load_state_dict(state_dict)
return mask2former
def convert_universal_segmentation(self, mask2former: Mask2FormerForUniversalSegmentation) -> Mask2FormerForUniversalSegmentation:
dst_state_dict = TrackedStateDict(mask2former.state_dict())
src_state_dict = self.original_model.state_dict()
self.replace_universal_segmentation_module(dst_state_dict, src_state_dict)
state_dict = {key: dst_state_dict[key] for key in dst_state_dict.to_track}
mask2former.load_state_dict(state_dict)
return mask2former
@staticmethod
def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[tuple[object, Path, Path]]:
checkpoints: list[Path] = checkpoints_dir.glob('**/*.pkl')
for checkpoint in checkpoints:
logger.info(f'💪 Converting {checkpoint.stem}')
dataset_name = checkpoint.parents[2].stem
if dataset_name == 'ade':
dataset_name = dataset_name.replace('ade', 'ade20k')
segmentation_task = checkpoint.parents[1].stem
config_file_name = f'{checkpoint.parents[0].stem}.yaml'
config: Path = config_dir / dataset_name / segmentation_task / 'swin' / config_file_name
yield (config, checkpoint)
|
class OriginalMask2FormerCheckpointToOursConverter:
def __init__(self, original_model: nn.Module, config: Mask2FormerConfig):
pass
def pop_all(self, renamed_keys: list[tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_maskformer_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: Mask2FormerConfig):
pass
def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: Mask2FormerConfig):
pass
def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str):
pass
def rename_keys_in_masked_attention_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_masked_attention_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_universal_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def convert(self, mask2former: Mask2FormerModel) -> Mask2FormerModel:
pass
def convert_universal_segmentation(self, mask2former: Mask2FormerForUniversalSegmentation) -> Mask2FormerForUniversalSegmentation:
pass
@staticmethod
def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[tuple[object, Path, Path]]:
pass
| 18
| 0
| 40
| 4
| 35
| 2
| 2
| 0.06
| 0
| 8
| 1
| 0
| 12
| 2
| 13
| 13
| 633
| 73
| 531
| 84
| 509
| 31
| 178
| 79
| 161
| 4
| 0
| 2
| 31
|
3,627
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py
|
transformers.models.mask2former.convert_mask2former_original_pytorch_checkpoint_to_pytorch.OriginalMask2FormerConfigToImageProcessorConverter
|
from transformers import Mask2FormerConfig, Mask2FormerForUniversalSegmentation, Mask2FormerImageProcessor, Mask2FormerModel, SwinConfig
import torch
class OriginalMask2FormerConfigToImageProcessorConverter:
def __call__(self, original_config: object) -> Mask2FormerImageProcessor:
model = original_config.MODEL
model_input = original_config.INPUT
return Mask2FormerImageProcessor(image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), size=model_input.MIN_SIZE_TEST, max_size=model_input.MAX_SIZE_TEST, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, ignore_index=model.SEM_SEG_HEAD.IGNORE_VALUE, size_divisibility=32)
|
class OriginalMask2FormerConfigToImageProcessorConverter:
def __call__(self, original_config: object) -> Mask2FormerImageProcessor:
pass
| 2
| 0
| 13
| 1
| 12
| 0
| 1
| 0
| 0
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 14
| 1
| 13
| 4
| 11
| 0
| 5
| 4
| 3
| 1
| 0
| 0
| 1
|
3,628
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py
|
transformers.models.mask2former.convert_mask2former_original_pytorch_checkpoint_to_pytorch.OriginalMask2FormerConfigToOursConverter
|
import json
from huggingface_hub import hf_hub_download
from transformers import Mask2FormerConfig, Mask2FormerForUniversalSegmentation, Mask2FormerImageProcessor, Mask2FormerModel, SwinConfig
class OriginalMask2FormerConfigToOursConverter:
def __call__(self, original_config: object) -> Mask2FormerConfig:
model = original_config.MODEL
repo_id = 'huggingface/label-files'
if model.SEM_SEG_HEAD.NUM_CLASSES == 847:
filename = 'mask2former-ade20k-full-id2label.json'
elif model.SEM_SEG_HEAD.NUM_CLASSES == 150:
filename = 'ade20k-id2label.json'
elif model.SEM_SEG_HEAD.NUM_CLASSES == 80:
filename = 'coco-detection-mmdet-id2label.json'
elif model.SEM_SEG_HEAD.NUM_CLASSES == 171:
filename = 'mask2former-coco-stuff-id2label.json'
elif model.SEM_SEG_HEAD.NUM_CLASSES == 133:
filename = 'coco-panoptic-id2label.json'
elif model.SEM_SEG_HEAD.NUM_CLASSES == 19:
filename = 'cityscapes-id2label.json'
elif model.SEM_SEG_HEAD.NUM_CLASSES == 8:
filename = 'cityscapes-instance-id2label.json'
elif model.SEM_SEG_HEAD.NUM_CLASSES == 65:
filename = 'mapillary-vistas-id2label.json'
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r'))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {label: idx for idx, label in id2label.items()}
if model.SWIN.EMBED_DIM == 96:
backbone_config = SwinConfig.from_pretrained('microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'])
elif model.SWIN.EMBED_DIM == 128:
backbone_config = SwinConfig(embed_dim=128, window_size=12, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), out_features=['stage1', 'stage2', 'stage3', 'stage4'])
elif model.SWIN.EMBED_DIM == 192:
backbone_config = SwinConfig.from_pretrained('microsoft/swin-large-patch4-window12-384', out_features=['stage1', 'stage2', 'stage3', 'stage4'])
else:
raise ValueError(f'embed dim {model.SWIN.EMBED_DIM} not supported for Swin!')
backbone_config.drop_path_rate = model.SWIN.DROP_PATH_RATE
backbone_config.attention_probs_dropout_prob = model.SWIN.ATTN_DROP_RATE
backbone_config.depths = model.SWIN.DEPTHS
config: Mask2FormerConfig = Mask2FormerConfig(ignore_value=model.SEM_SEG_HEAD.IGNORE_VALUE, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, num_queries=model.MASK_FORMER.NUM_OBJECT_QUERIES, no_object_weight=model.MASK_FORMER.NO_OBJECT_WEIGHT, class_weight=model.MASK_FORMER.CLASS_WEIGHT, mask_weight=model.MASK_FORMER.MASK_WEIGHT, dice_weight=model.MASK_FORMER.DICE_WEIGHT, train_num_points=model.MASK_FORMER.TRAIN_NUM_POINTS, oversample_ratio=model.MASK_FORMER.OVERSAMPLE_RATIO, importance_sample_ratio=model.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO, init_std=0.02, init_xavier_std=1.0, use_auxiliary_loss=model.MASK_FORMER.DEEP_SUPERVISION, feature_strides=[4, 8, 16, 32], backbone_config=backbone_config, id2label=id2label, label2id=label2id, feature_size=model.SEM_SEG_HEAD.CONVS_DIM, mask_feature_size=model.SEM_SEG_HEAD.MASK_DIM, hidden_dim=model.MASK_FORMER.HIDDEN_DIM, encoder_layers=model.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS, encoder_feedforward_dim=1024, decoder_layers=model.MASK_FORMER.DEC_LAYERS, num_attention_heads=model.MASK_FORMER.NHEADS, dropout=model.MASK_FORMER.DROPOUT, dim_feedforward=model.MASK_FORMER.DIM_FEEDFORWARD, pre_norm=model.MASK_FORMER.PRE_NORM, enforce_input_proj=model.MASK_FORMER.ENFORCE_INPUT_PROJ, common_stride=model.SEM_SEG_HEAD.COMMON_STRIDE)
return config
|
class OriginalMask2FormerConfigToOursConverter:
def __call__(self, original_config: object) -> Mask2FormerConfig:
pass
| 2
| 0
| 81
| 6
| 75
| 0
| 12
| 0
| 0
| 5
| 0
| 0
| 1
| 0
| 1
| 1
| 82
| 6
| 76
| 9
| 74
| 0
| 26
| 9
| 24
| 12
| 0
| 1
| 12
|
3,629
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/image_processing_mask2former.py
|
transformers.models.mask2former.image_processing_mask2former.Mask2FormerImageProcessor
|
from ...image_transforms import PaddingMode, get_resize_output_image_size, pad, rescale, resize, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
import numpy as np
from collections.abc import Iterable
from ...utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, logging
from ...image_processing_utils import INIT_SERVICE_KWARGS, BaseImageProcessor, BatchFeature, get_size_dict
from typing import Any, Optional, Union
class Mask2FormerImageProcessor(BaseImageProcessor):
"""
Constructs a Mask2Former image processor. The image processor can be used to prepare image(s) and optional targets
for the model.
This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input to a certain `size`.
size (`int`, *optional*, defaults to 800):
Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
height / width, size)`.
size_divisor (`int`, *optional*, defaults to 32):
Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in
Swin Transformer.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
`PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
`PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input to a certain `scale`.
rescale_factor (`float`, *optional*, defaults to `1/ 255`):
Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
ImageNet std.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k).
The background label will be replaced by `ignore_index`.
num_labels (`int`, *optional*):
The number of labels in the segmentation map.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
model_input_names = ['pixel_values', 'pixel_mask']
@filter_out_non_signature_kwargs(extra=['max_size', *INIT_SERVICE_KWARGS])
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, num_labels: Optional[int]=None, pad_size: Optional[dict[str, int]]=None, **kwargs):
super().__init__(**kwargs)
self._max_size = kwargs.pop('max_size', 1333)
size = size if size is not None else {'shortest_edge': 800, 'longest_edge': self._max_size}
size = get_size_dict(size, max_size=self._max_size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.size_divisor = size_divisor
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.ignore_index = ignore_index
self.do_reduce_labels = do_reduce_labels
self.num_labels = num_labels
self.pad_size = pad_size
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the
`_max_size` attribute from the dictionary.
"""
image_processor_dict = super().to_dict()
image_processor_dict.pop('_max_size', None)
return image_processor_dict
def resize(self, image: np.ndarray, size: dict[str, int], size_divisor: int=0, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
The size of the output image.
size_divisor (`int`, *optional*, defaults to 0):
If `size_divisor` is given, the output image size will be divisible by the number.
resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
max_size = kwargs.pop('max_size', None)
size = get_size_dict(size, max_size=max_size, default_to_square=False)
if 'shortest_edge' in size and 'longest_edge' in size:
size, max_size = (size['shortest_edge'], size['longest_edge'])
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
max_size = None
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.")
size = get_mask2former_resize_output_image_size(image=image, size=size, max_size=max_size, size_divisor=size_divisor, default_to_square=False, input_data_format=input_data_format)
image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
return image
def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Rescale the image by the given factor. image = image * rescale_factor.
Args:
image (`np.ndarray`):
Image to rescale.
rescale_factor (`float`):
The value to use for rescaling.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
def convert_segmentation_map_to_binary_masks(self, segmentation_map: 'np.ndarray', instance_id_to_semantic_id: Optional[dict[int, int]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False):
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
ignore_index = ignore_index if ignore_index is not None else self.ignore_index
return convert_segmentation_map_to_binary_masks(segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels)
def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature:
return self.preprocess(images, segmentation_maps=segmentation_maps, **kwargs)
def _preprocess(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
if do_resize:
image = self.resize(image, size=size, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
return image
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single image."""
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(image=image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_mask(self, segmentation_map: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: int=0, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single mask."""
segmentation_map = to_numpy_array(segmentation_map)
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map)
segmentation_map = self._preprocess(image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, size_divisor=size_divisor, do_rescale=False, do_normalize=False, input_data_format=input_data_format)
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
return segmentation_map
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[dict[int, int]]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None) -> BatchFeature:
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False, max_size=self._max_size)
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
ignore_index = ignore_index if ignore_index is not None else self.ignore_index
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
pad_size = self.pad_size if pad_size is None else pad_size
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
if segmentation_maps is not None and (not valid_images(segmentation_maps)):
raise ValueError('Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if segmentation_maps is not None and len(images) != len(segmentation_maps):
raise ValueError('Images and segmentation maps must have the same length.')
images = [self._preprocess_image(image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for image in images]
if segmentation_maps is not None:
segmentation_maps = [self._preprocess_mask(segmentation_map, do_resize, size, size_divisor, input_data_format=input_data_format) for segmentation_map in segmentation_maps]
encoded_inputs = self.encode_inputs(images, segmentation_maps, instance_id_to_semantic_id, ignore_index, do_reduce_labels, return_tensors, input_data_format=data_format, pad_size=pad_size)
return encoded_inputs
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
return padded_image
def pad(self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None) -> BatchFeature:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
pad_size = pad_size if pad_size is not None else self.pad_size
if pad_size is not None:
padded_size = (pad_size['height'], pad_size['width'])
else:
padded_size = get_max_height_width(images, input_data_format=input_data_format)
padded_images = [self._pad_image(image, padded_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) for image in images]
data = {'pixel_values': padded_images}
if return_pixel_mask:
masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images]
data['pixel_mask'] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
def encode_inputs(self, pixel_values_list: list[ImageInput], segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None):
"""
Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
Mask2Former addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps
will be converted to lists of binary masks and their respective labels. Let's see an example, assuming
`segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =
[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for
each mask.
Args:
pixel_values_list (`list[ImageInput]`):
List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,
width)`.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an
instance segmentation map where each pixel represents an instance id. Can be provided as a single
dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map
instance ids in each image separately.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
objects.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model.
- **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in
`self.model_input_names`).
- **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model
(when `annotations` are provided).
- **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when
`annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of
`mask_labels[i][j]` if `class_labels[i][j]`.
"""
ignore_index = self.ignore_index if ignore_index is None else ignore_index
do_reduce_labels = self.do_reduce_labels if do_reduce_labels is None else do_reduce_labels
pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list]
if input_data_format is None:
input_data_format = infer_channel_dimension_format(pixel_values_list[0])
encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors, input_data_format=input_data_format, pad_size=pad_size)
if segmentation_maps is not None:
mask_labels = []
class_labels = []
pad_size = get_max_height_width(pixel_values_list, input_data_format=input_data_format)
for idx, segmentation_map in enumerate(segmentation_maps):
segmentation_map = to_numpy_array(segmentation_map)
if isinstance(instance_id_to_semantic_id, list):
instance_id = instance_id_to_semantic_id[idx]
else:
instance_id = instance_id_to_semantic_id
masks, classes = self.convert_segmentation_map_to_binary_masks(segmentation_map, instance_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels)
if masks.shape[0] > 0:
masks = [mask[None, ...] for mask in masks]
masks = [self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks]
masks = np.concatenate(masks, axis=0)
else:
masks = np.zeros((0, *pad_size), dtype=np.float32)
mask_labels.append(torch.from_numpy(masks))
class_labels.append(torch.from_numpy(classes))
encoded_inputs['mask_labels'] = mask_labels
encoded_inputs['class_labels'] = class_labels
return encoded_inputs
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]]=None) -> 'torch.Tensor':
"""
Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`Mask2FormerForUniversalSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
masks_queries_logits = torch.nn.functional.interpolate(masks_queries_logits, size=(384, 384), mode='bilinear', align_corners=False)
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid()
segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = torch.nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
def post_process_instance_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[list[tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False, return_binary_maps: Optional[bool]=False) -> list[dict]:
"""
Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into instance segmentation predictions.
Only supports PyTorch. If instances could overlap, set either return_coco_annotation or return_binary_maps
to `True` to get the correct segmentation result.
Args:
outputs ([`Mask2FormerForUniversalSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
return_coco_annotation (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format.
return_binary_maps (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps
(one per detected instance).
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id`, or
`list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`, or a tensor of shape `(num_instances, height, width)` if return_binary_maps is set to `True`.
Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if return_coco_annotation and return_binary_maps:
raise ValueError('return_coco_annotation and return_binary_maps can not be both set to True.')
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
masks_queries_logits = torch.nn.functional.interpolate(masks_queries_logits, size=(384, 384), mode='bilinear', align_corners=False)
device = masks_queries_logits.device
num_classes = class_queries_logits.shape[-1] - 1
num_queries = class_queries_logits.shape[-2]
results: list[dict[str, TensorType]] = []
for i in range(class_queries_logits.shape[0]):
mask_pred = masks_queries_logits[i]
mask_cls = class_queries_logits[i]
scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1]
labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = torch.div(topk_indices, num_classes, rounding_mode='floor')
mask_pred = mask_pred[topk_indices]
pred_masks = (mask_pred > 0).float()
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / (pred_masks.flatten(1).sum(1) + 1e-06)
pred_scores = scores_per_image * mask_scores_per_image
pred_classes = labels_per_image
segmentation = torch.zeros((384, 384)) - 1
if target_sizes is not None:
segmentation = torch.zeros(target_sizes[i]) - 1
pred_masks = torch.nn.functional.interpolate(pred_masks.unsqueeze(0), size=target_sizes[i], mode='nearest')[0]
instance_maps, segments = ([], [])
current_segment_id = 0
for j in range(num_queries):
score = pred_scores[j].item()
if not torch.all(pred_masks[j] == 0) and score >= threshold:
segmentation[pred_masks[j] == 1] = current_segment_id
segments.append({'id': current_segment_id, 'label_id': pred_classes[j].item(), 'was_fused': False, 'score': round(score, 6)})
current_segment_id += 1
instance_maps.append(pred_masks[j])
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
if return_binary_maps and len(instance_maps) != 0:
segmentation = torch.stack(instance_maps, dim=0)
results.append({'segmentation': segmentation, 'segments_info': segments})
return results
def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[set[int]]=None, target_sizes: Optional[list[tuple[int, int]]]=None) -> list[dict]:
"""
Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`Mask2FormerForUniversalSegmentationOutput`]):
The outputs from [`Mask2FormerForUniversalSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning('`label_ids_to_fuse` unset. No instance will be fused.')
label_ids_to_fuse = set()
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
masks_queries_logits = torch.nn.functional.interpolate(masks_queries_logits, size=(384, 384), mode='bilinear', align_corners=False)
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid()
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels)
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({'segmentation': segmentation, 'segments_info': []})
continue
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size)
results.append({'segmentation': segmentation, 'segments_info': segments})
return results
|
class Mask2FormerImageProcessor(BaseImageProcessor):
'''
Constructs a Mask2Former image processor. The image processor can be used to prepare image(s) and optional targets
for the model.
This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input to a certain `size`.
size (`int`, *optional*, defaults to 800):
Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
height / width, size)`.
size_divisor (`int`, *optional*, defaults to 32):
Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in
Swin Transformer.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
`PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
`PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input to a certain `scale`.
rescale_factor (`float`, *optional*, defaults to `1/ 255`):
Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
ImageNet std.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k).
The background label will be replaced by `ignore_index`.
num_labels (`int`, *optional*):
The number of labels in the segmentation map.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
'''
@filter_out_non_signature_kwargs(extra=['max_size', *INIT_SERVICE_KWARGS])
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, num_labels: Optional[int]=None, pad_size: Optional[dict[str, int]]=None, **kwargs):
pass
def to_dict(self) -> dict[str, Any]:
'''
Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the
`_max_size` attribute from the dictionary.
'''
pass
def resize(self, image: np.ndarray, size: dict[str, int], size_divisor: int=0, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
The size of the output image.
size_divisor (`int`, *optional*, defaults to 0):
If `size_divisor` is given, the output image size will be divisible by the number.
resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Rescale the image by the given factor. image = image * rescale_factor.
Args:
image (`np.ndarray`):
Image to rescale.
rescale_factor (`float`):
The value to use for rescaling.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
'''
pass
def convert_segmentation_map_to_binary_masks(self, segmentation_map: 'np.ndarray', instance_id_to_semantic_id: Optional[dict[int, int]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False):
pass
def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature:
pass
def _preprocess(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
pass
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single image.'''
pass
def _preprocess_mask(self, segmentation_map: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: int=0, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single mask.'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[dict[int, int]]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None) -> BatchFeature:
pass
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Pad an image with zeros to the given size.
'''
pass
def pad(self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None) -> BatchFeature:
'''
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
'''
pass
def encode_inputs(self, pixel_values_list: list[ImageInput], segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None):
'''
Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
Mask2Former addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps
will be converted to lists of binary masks and their respective labels. Let's see an example, assuming
`segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =
[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for
each mask.
Args:
pixel_values_list (`list[ImageInput]`):
List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,
width)`.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an
instance segmentation map where each pixel represents an instance id. Can be provided as a single
dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map
instance ids in each image separately.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
objects.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model.
- **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in
`self.model_input_names`).
- **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model
(when `annotations` are provided).
- **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when
`annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of
`mask_labels[i][j]` if `class_labels[i][j]`.
'''
pass
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]]=None) -> 'torch.Tensor':
'''
Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`Mask2FormerForUniversalSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
'''
pass
def post_process_instance_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[list[tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False, return_binary_maps: Optional[bool]=False) -> list[dict]:
'''
Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into instance segmentation predictions.
Only supports PyTorch. If instances could overlap, set either return_coco_annotation or return_binary_maps
to `True` to get the correct segmentation result.
Args:
outputs ([`Mask2FormerForUniversalSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
return_coco_annotation (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format.
return_binary_maps (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps
(one per detected instance).
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id`, or
`list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`, or a tensor of shape `(num_instances, height, width)` if return_binary_maps is set to `True`.
Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
'''
pass
def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[set[int]]=None, target_sizes: Optional[list[tuple[int, int]]]=None) -> list[dict]:
'''
Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`Mask2FormerForUniversalSegmentationOutput`]):
The outputs from [`Mask2FormerForUniversalSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
'''
pass
| 19
| 12
| 48
| 4
| 30
| 13
| 4
| 0.52
| 1
| 14
| 3
| 0
| 16
| 13
| 17
| 37
| 888
| 92
| 527
| 234
| 369
| 275
| 237
| 98
| 219
| 18
| 3
| 3
| 76
|
3,630
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerAttention
|
from torch import Tensor, nn
from typing import Optional, Union
import torch
class Mask2FormerAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and
keys (as explained in the DETR paper).
"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, key_value_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
hidden_states = hidden_states.permute(1, 0, 2) if hidden_states is not None else None
position_embeddings = position_embeddings.permute(1, 0, 2) if position_embeddings is not None else None
key_value_states = key_value_states.permute(1, 0, 2) if key_value_states is not None else None
key_value_position_embeddings = key_value_position_embeddings.permute(1, 0, 2) if key_value_position_embeddings is not None else None
is_cross_attention = key_value_states is not None
batch_size, target_len, embed_dim = hidden_states.size()
if position_embeddings is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
if key_value_position_embeddings is not None:
key_value_states_original = key_value_states
key_value_states = self.with_pos_embed(key_value_states, key_value_position_embeddings)
query_states = self.q_proj(hidden_states) * self.scaling
if is_cross_attention:
key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
else:
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is {attn_weights.size()}')
if attention_mask is not None:
if attention_mask.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(f'Attention mask should be of size {(target_len, batch_size * self.num_heads, source_len)}, but is {attention_mask.size()}')
attn_weights += attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output).permute(1, 0, 2)
return (attn_output, attn_weights_reshaped)
|
class Mask2FormerAttention(nn.Module):
'''
Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and
keys (as explained in the DETR paper).
'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
pass
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, key_value_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 5
| 2
| 31
| 5
| 23
| 3
| 5
| 0.18
| 1
| 6
| 0
| 0
| 4
| 9
| 4
| 14
| 134
| 23
| 94
| 42
| 74
| 17
| 63
| 27
| 58
| 13
| 1
| 2
| 18
|
3,631
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerForUniversalSegmentation
|
from ...utils import auto_docstring, is_accelerate_available, logging
from typing import Optional, Union
from .configuration_mask2former import Mask2FormerConfig
from torch import Tensor, nn
import torch
@auto_docstring(custom_intro='\n The Mask2Former Model with heads on top for instance/semantic/panoptic segmentation.\n ')
class Mask2FormerForUniversalSegmentation(Mask2FormerPreTrainedModel):
main_input_name = 'pixel_values'
def __init__(self, config: Mask2FormerConfig):
super().__init__(config)
self.model = Mask2FormerModel(config)
self.weight_dict: dict[str, float] = {'loss_cross_entropy': config.class_weight, 'loss_mask': config.mask_weight, 'loss_dice': config.dice_weight}
self.class_predictor = nn.Linear(config.hidden_dim, config.num_labels + 1)
self.criterion = Mask2FormerLoss(config=config, weight_dict=self.weight_dict)
self.post_init()
def get_loss_dict(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, auxiliary_predictions: dict[str, Tensor]) -> dict[str, Tensor]:
loss_dict: dict[str, Tensor] = self.criterion(masks_queries_logits=masks_queries_logits, class_queries_logits=class_queries_logits, mask_labels=mask_labels, class_labels=class_labels, auxiliary_predictions=auxiliary_predictions)
for key, weight in self.weight_dict.items():
for loss_key, loss in loss_dict.items():
if key in loss_key:
loss *= weight
return loss_dict
def get_loss(self, loss_dict: dict[str, Tensor]) -> Tensor:
return sum(loss_dict.values())
def get_auxiliary_logits(self, classes: torch.Tensor, output_masks: torch.Tensor):
auxiliary_logits: list[dict[str, Tensor]] = []
for aux_binary_masks, aux_classes in zip(output_masks[:-1], classes[:-1]):
auxiliary_logits.append({'masks_queries_logits': aux_binary_masks, 'class_queries_logits': aux_classes})
return auxiliary_logits
@auto_docstring
def forward(self, pixel_values: Tensor, mask_labels: Optional[list[Tensor]]=None, class_labels: Optional[list[Tensor]]=None, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_auxiliary_logits: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Mask2FormerForUniversalSegmentationOutput:
"""
mask_labels (`list[torch.Tensor]`, *optional*):
List of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`list[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
output_auxiliary_logits (`bool`, *optional*):
Whether or not to output auxiliary logits.
Examples:
Instance segmentation example:
```python
>>> from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
>>> from PIL import Image
>>> import requests
>>> import torch
>>> # Load Mask2Former trained on COCO instance segmentation dataset
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-small-coco-instance")
>>> model = Mask2FormerForUniversalSegmentation.from_pretrained(
... "facebook/mask2former-swin-small-coco-instance"
... )
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # Model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # Perform post-processing to get instance segmentation map
>>> pred_instance_map = image_processor.post_process_instance_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]
>>> print(pred_instance_map.shape)
torch.Size([480, 640])
```
Semantic segmentation example:
```python
>>> from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
>>> from PIL import Image
>>> import requests
>>> import torch
>>> # Load Mask2Former trained on ADE20k semantic segmentation dataset
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-small-ade-semantic")
>>> model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-small-ade-semantic")
>>> url = (
... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
... )
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # Model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # Perform post-processing to get semantic segmentation map
>>> pred_semantic_map = image_processor.post_process_semantic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]
>>> print(pred_semantic_map.shape)
torch.Size([512, 683])
```
Panoptic segmentation example:
```python
>>> from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
>>> from PIL import Image
>>> import requests
>>> import torch
>>> # Load Mask2Former trained on CityScapes panoptic segmentation dataset
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-small-cityscapes-panoptic")
>>> model = Mask2FormerForUniversalSegmentation.from_pretrained(
... "facebook/mask2former-swin-small-cityscapes-panoptic"
... )
>>> url = "https://cdn-media.huggingface.co/Inference-API/Sample-results-on-the-Cityscapes-dataset-The-above-images-show-how-our-method-can-handle.png"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # Model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # Perform post-processing to get panoptic segmentation map
>>> pred_panoptic_map = image_processor.post_process_panoptic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]["segmentation"]
>>> print(pred_panoptic_map.shape)
torch.Size([338, 676])
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(pixel_values=pixel_values, pixel_mask=pixel_mask, output_hidden_states=output_hidden_states or self.config.use_auxiliary_loss, output_attentions=output_attentions, return_dict=True)
loss, loss_dict, auxiliary_logits = (None, None, None)
class_queries_logits = ()
for decoder_output in outputs.transformer_decoder_intermediate_states:
class_prediction = self.class_predictor(decoder_output.transpose(0, 1))
class_queries_logits += (class_prediction,)
masks_queries_logits = outputs.masks_queries_logits
auxiliary_logits = self.get_auxiliary_logits(class_queries_logits, masks_queries_logits)
if mask_labels is not None and class_labels is not None:
loss_dict = self.get_loss_dict(masks_queries_logits=masks_queries_logits[-1], class_queries_logits=class_queries_logits[-1], mask_labels=mask_labels, class_labels=class_labels, auxiliary_predictions=auxiliary_logits)
loss = self.get_loss(loss_dict)
encoder_hidden_states = None
pixel_decoder_hidden_states = None
transformer_decoder_hidden_states = None
if output_hidden_states:
encoder_hidden_states = outputs.encoder_hidden_states
pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states
transformer_decoder_hidden_states = outputs.transformer_decoder_hidden_states
output_auxiliary_logits = self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits
if not output_auxiliary_logits:
auxiliary_logits = None
output = Mask2FormerForUniversalSegmentationOutput(loss=loss, class_queries_logits=class_queries_logits[-1], masks_queries_logits=masks_queries_logits[-1], auxiliary_logits=auxiliary_logits, encoder_last_hidden_state=outputs.encoder_last_hidden_state, pixel_decoder_last_hidden_state=outputs.pixel_decoder_last_hidden_state, transformer_decoder_last_hidden_state=outputs.transformer_decoder_last_hidden_state, encoder_hidden_states=encoder_hidden_states, pixel_decoder_hidden_states=pixel_decoder_hidden_states, transformer_decoder_hidden_states=transformer_decoder_hidden_states, attentions=outputs.attentions)
if not return_dict:
output = tuple((v for v in output.values() if v is not None))
if loss is not None:
output = loss + output
return output
|
@auto_docstring(custom_intro='\n The Mask2Former Model with heads on top for instance/semantic/panoptic segmentation.\n ')
class Mask2FormerForUniversalSegmentation(Mask2FormerPreTrainedModel):
def __init__(self, config: Mask2FormerConfig):
pass
def get_loss_dict(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, auxiliary_predictions: dict[str, Tensor]) -> dict[str, Tensor]:
pass
def get_loss_dict(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, auxiliary_predictions: dict[str, Tensor]) -> dict[str, Tensor]:
pass
def get_auxiliary_logits(self, classes: torch.Tensor, output_masks: torch.Tensor):
pass
@auto_docstring
def forward(self, pixel_values: Tensor, mask_labels: Optional[list[Tensor]]=None, class_labels: Optional[list[Tensor]]=None, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_auxiliary_logits: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Mask2FormerForUniversalSegmentationOutput:
'''
mask_labels (`list[torch.Tensor]`, *optional*):
List of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`list[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
output_auxiliary_logits (`bool`, *optional*):
Whether or not to output auxiliary logits.
Examples:
Instance segmentation example:
```python
>>> from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
>>> from PIL import Image
>>> import requests
>>> import torch
>>> # Load Mask2Former trained on COCO instance segmentation dataset
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-small-coco-instance")
>>> model = Mask2FormerForUniversalSegmentation.from_pretrained(
... "facebook/mask2former-swin-small-coco-instance"
... )
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # Model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # Perform post-processing to get instance segmentation map
>>> pred_instance_map = image_processor.post_process_instance_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]
>>> print(pred_instance_map.shape)
torch.Size([480, 640])
```
Semantic segmentation example:
```python
>>> from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
>>> from PIL import Image
>>> import requests
>>> import torch
>>> # Load Mask2Former trained on ADE20k semantic segmentation dataset
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-small-ade-semantic")
>>> model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-small-ade-semantic")
>>> url = (
... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
... )
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # Model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # Perform post-processing to get semantic segmentation map
>>> pred_semantic_map = image_processor.post_process_semantic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]
>>> print(pred_semantic_map.shape)
torch.Size([512, 683])
```
Panoptic segmentation example:
```python
>>> from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
>>> from PIL import Image
>>> import requests
>>> import torch
>>> # Load Mask2Former trained on CityScapes panoptic segmentation dataset
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-small-cityscapes-panoptic")
>>> model = Mask2FormerForUniversalSegmentation.from_pretrained(
... "facebook/mask2former-swin-small-cityscapes-panoptic"
... )
>>> url = "https://cdn-media.huggingface.co/Inference-API/Sample-results-on-the-Cityscapes-dataset-The-above-images-show-how-our-method-can-handle.png"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # Model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # Perform post-processing to get panoptic segmentation map
>>> pred_panoptic_map = image_processor.post_process_panoptic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]["segmentation"]
>>> print(pred_panoptic_map.shape)
torch.Size([338, 676])
```
'''
pass
| 8
| 1
| 48
| 8
| 21
| 18
| 4
| 0.83
| 1
| 11
| 4
| 0
| 5
| 4
| 5
| 6
| 248
| 45
| 111
| 44
| 86
| 92
| 54
| 26
| 48
| 11
| 2
| 3
| 19
|
3,632
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerForUniversalSegmentationOutput
|
from ...utils import auto_docstring, is_accelerate_available, logging
from ...file_utils import ModelOutput, is_scipy_available, requires_backends
from typing import Optional, Union
from dataclasses import dataclass
import torch
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`Mask2FormerForUniversalSegmentationOutput`].\n\n This output can be directly passed to [`~Mask2FormerImageProcessor.post_process_semantic_segmentation`] or\n [`~Mask2FormerImageProcessor.post_process_instance_segmentation`] or\n [`~Mask2FormerImageProcessor.post_process_panoptic_segmentation`] to compute final segmentation maps. Please, see\n [`~Mask2FormerImageProcessor] for details regarding usage.\n ')
class Mask2FormerForUniversalSegmentationOutput(ModelOutput):
"""
loss (`torch.Tensor`, *optional*):
The computed loss, returned when labels are present.
class_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each
query. Note the `+ 1` is needed because we incorporate the null class.
masks_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each
query.
auxiliary_logits (`list[Dict(str, torch.FloatTensor)]`, *optional*):
List of class and mask predictions from each layer of the transformer decoder.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the encoder model (backbone).
pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the pixel decoder model.
transformer_decoder_last_hidden_state (`tuple(torch.FloatTensor)`):
Final output of the transformer decoder `(batch_size, sequence_length, hidden_size)`.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
"""
loss: Optional[torch.FloatTensor] = None
class_queries_logits: Optional[torch.FloatTensor] = None
masks_queries_logits: Optional[torch.FloatTensor] = None
auxiliary_logits: Optional[list[dict[str, torch.FloatTensor]]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
pixel_decoder_last_hidden_state: Optional[torch.FloatTensor] = None
transformer_decoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`Mask2FormerForUniversalSegmentationOutput`].\n\n This output can be directly passed to [`~Mask2FormerImageProcessor.post_process_semantic_segmentation`] or\n [`~Mask2FormerImageProcessor.post_process_instance_segmentation`] or\n [`~Mask2FormerImageProcessor.post_process_panoptic_segmentation`] to compute final segmentation maps. Please, see\n [`~Mask2FormerImageProcessor] for details regarding usage.\n ')
class Mask2FormerForUniversalSegmentationOutput(ModelOutput):
'''
loss (`torch.Tensor`, *optional*):
The computed loss, returned when labels are present.
class_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each
query. Note the `+ 1` is needed because we incorporate the null class.
masks_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each
query.
auxiliary_logits (`list[Dict(str, torch.FloatTensor)]`, *optional*):
List of class and mask predictions from each layer of the transformer decoder.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the encoder model (backbone).
pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the pixel decoder model.
transformer_decoder_last_hidden_state (`tuple(torch.FloatTensor)`):
Final output of the transformer decoder `(batch_size, sequence_length, hidden_size)`.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 3
| 12
| 12
| 11
| 39
| 12
| 12
| 11
| 0
| 1
| 0
| 0
|
3,633
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerHungarianMatcher
|
from torch import Tensor, nn
import numpy as np
import torch
class Mask2FormerHungarianMatcher(nn.Module):
"""This class computes an assignment between the labels and the predictions of the network.
For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more
predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float=1.0, cost_mask: float=1.0, cost_dice: float=1.0, num_points: int=12544):
"""Creates the matcher
Params:
cost_class (`float`, *optional*, defaults to 1.0):
Relative weight of the classification error in the matching cost.
cost_mask (`float`, *optional*, defaults to 1.0):
This is the relative weight of the focal loss of the binary mask in the matching cost.
cost_dice (`float`, *optional*, defaults to 1.0):
This is the relative weight of the dice loss of the binary mask in the matching cost.
num_points (`int`, *optional*, defaults to 12544):
No. of points to sample on which the mask loss will be calculated. The same set of K points are
uniformly sampled for all prediction and ground truth masks to construct the cost matrix for bipartite
matching.
"""
super().__init__()
if cost_class == 0 and cost_mask == 0 and (cost_dice == 0):
raise ValueError("All costs can't be 0")
self.num_points = num_points
self.cost_class = cost_class
self.cost_mask = cost_mask
self.cost_dice = cost_dice
@torch.no_grad()
def forward(self, masks_queries_logits: torch.Tensor, class_queries_logits: torch.Tensor, mask_labels: torch.Tensor, class_labels: torch.Tensor) -> list[tuple[Tensor]]:
"""
Params:
masks_queries_logits (`torch.Tensor`):
A tensor of dim `batch_size, num_queries, num_labels` with the classification logits.
class_queries_logits (`torch.Tensor`):
A tensor of dim `batch_size, num_queries, height, width` with the predicted masks.
class_labels (`torch.Tensor`):
A tensor of dim `num_target_boxes` (where num_target_boxes is the number of ground-truth objects in the
target) containing the class labels.
mask_labels (`torch.Tensor`):
A tensor of dim `num_target_boxes, height, width` containing the target masks.
Returns:
matched_indices (`list[tuple[Tensor]]`): A list of size batch_size, containing tuples of (index_i, index_j)
where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected labels (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes).
"""
indices: list[tuple[np.array]] = []
batch_size = masks_queries_logits.shape[0]
for i in range(batch_size):
pred_probs = class_queries_logits[i].softmax(-1)
pred_mask = masks_queries_logits[i]
cost_class = -pred_probs[:, class_labels[i]]
target_mask = mask_labels[i].to(pred_mask)
target_mask = target_mask[:, None]
pred_mask = pred_mask[:, None]
point_coordinates = torch.rand(1, self.num_points, 2, device=pred_mask.device)
target_coordinates = point_coordinates.repeat(target_mask.shape[0], 1, 1)
target_mask = sample_point(target_mask, target_coordinates, align_corners=False).squeeze(1)
pred_coordinates = point_coordinates.repeat(pred_mask.shape[0], 1, 1)
pred_mask = sample_point(pred_mask, pred_coordinates, align_corners=False).squeeze(1)
cost_mask = pair_wise_sigmoid_cross_entropy_loss(pred_mask, target_mask)
cost_dice = pair_wise_dice_loss(pred_mask, target_mask)
cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice
cost_matrix = torch.minimum(cost_matrix, torch.tensor(10000000000.0))
cost_matrix = torch.maximum(cost_matrix, torch.tensor(-10000000000.0))
cost_matrix = torch.nan_to_num(cost_matrix, 0)
assigned_indices: tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())
indices.append(assigned_indices)
matched_indices = [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
return matched_indices
|
class Mask2FormerHungarianMatcher(nn.Module):
'''This class computes an assignment between the labels and the predictions of the network.
For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more
predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
'''
def __init__(self, cost_class: float=1.0, cost_mask: float=1.0, cost_dice: float=1.0, num_points: int=12544):
'''Creates the matcher
Params:
cost_class (`float`, *optional*, defaults to 1.0):
Relative weight of the classification error in the matching cost.
cost_mask (`float`, *optional*, defaults to 1.0):
This is the relative weight of the focal loss of the binary mask in the matching cost.
cost_dice (`float`, *optional*, defaults to 1.0):
This is the relative weight of the dice loss of the binary mask in the matching cost.
num_points (`int`, *optional*, defaults to 12544):
No. of points to sample on which the mask loss will be calculated. The same set of K points are
uniformly sampled for all prediction and ground truth masks to construct the cost matrix for bipartite
matching.
'''
pass
@torch.no_grad()
def forward(self, masks_queries_logits: torch.Tensor, class_queries_logits: torch.Tensor, mask_labels: torch.Tensor, class_labels: torch.Tensor) -> list[tuple[Tensor]]:
'''
Params:
masks_queries_logits (`torch.Tensor`):
A tensor of dim `batch_size, num_queries, num_labels` with the classification logits.
class_queries_logits (`torch.Tensor`):
A tensor of dim `batch_size, num_queries, height, width` with the predicted masks.
class_labels (`torch.Tensor`):
A tensor of dim `num_target_boxes` (where num_target_boxes is the number of ground-truth objects in the
target) containing the class labels.
mask_labels (`torch.Tensor`):
A tensor of dim `num_target_boxes, height, width` containing the target masks.
Returns:
matched_indices (`list[tuple[Tensor]]`): A list of size batch_size, containing tuples of (index_i, index_j)
where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected labels (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes).
'''
pass
| 4
| 3
| 47
| 5
| 22
| 21
| 2
| 1.02
| 1
| 6
| 0
| 0
| 2
| 4
| 2
| 12
| 104
| 13
| 45
| 31
| 33
| 46
| 34
| 22
| 31
| 2
| 1
| 1
| 4
|
3,634
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerLoss
|
from ...utils import auto_docstring, is_accelerate_available, logging
import numpy as np
from ...file_utils import ModelOutput, is_scipy_available, requires_backends
from typing import Optional, Union
from .configuration_mask2former import Mask2FormerConfig
from torch import Tensor, nn
import torch
class Mask2FormerLoss(nn.Module):
def __init__(self, config: Mask2FormerConfig, weight_dict: dict[str, float]):
"""
The Mask2Former Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we
compute hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair
of matched ground-truth / prediction (supervise class and mask)
Args:
config (`Mask2FormerConfig`):
The configuration for Mask2Former model also containing loss calculation specific parameters.
weight_dict (`dict[str, float]`):
A dictionary of weights to be applied to the different losses.
"""
super().__init__()
requires_backends(self, ['scipy'])
self.num_labels = config.num_labels
self.weight_dict = weight_dict
self.eos_coef = config.no_object_weight
empty_weight = torch.ones(self.num_labels + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
self.num_points = config.train_num_points
self.oversample_ratio = config.oversample_ratio
self.importance_sample_ratio = config.importance_sample_ratio
self.matcher = Mask2FormerHungarianMatcher(cost_class=config.class_weight, cost_dice=config.dice_weight, cost_mask=config.mask_weight, num_points=self.num_points)
def _max_by_axis(self, sizes: list[list[int]]) -> list[int]:
maxes = sizes[0]
for sublist in sizes[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def _pad_images_to_max_in_batch(self, tensors: list[Tensor]) -> tuple[Tensor, Tensor]:
max_size = self._max_by_axis([list(tensor.shape) for tensor in tensors])
batch_shape = [len(tensors)] + max_size
batch_size, _, height, width = batch_shape
dtype = tensors[0].dtype
device = tensors[0].device
padded_tensors = torch.zeros(batch_shape, dtype=dtype, device=device)
padding_masks = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)
for tensor, padded_tensor, padding_mask in zip(tensors, padded_tensors, padding_masks):
padded_tensor[:tensor.shape[0], :tensor.shape[1], :tensor.shape[2]].copy_(tensor)
padding_mask[:tensor.shape[1], :tensor.shape[2]] = False
return (padded_tensors, padding_masks)
def loss_labels(self, class_queries_logits: Tensor, class_labels: list[Tensor], indices: tuple[np.array]) -> dict[str, Tensor]:
"""Compute the losses related to the labels using cross entropy.
Args:
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
"""
pred_logits = class_queries_logits
batch_size, num_queries, _ = pred_logits.shape
criterion = nn.CrossEntropyLoss(weight=self.empty_weight)
idx = self._get_predictions_permutation_indices(indices)
target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)])
target_classes = torch.full((batch_size, num_queries), fill_value=self.num_labels, dtype=torch.int64, device=pred_logits.device)
target_classes[idx] = target_classes_o
pred_logits_transposed = pred_logits.transpose(1, 2)
loss_ce = criterion(pred_logits_transposed, target_classes)
losses = {'loss_cross_entropy': loss_ce}
return losses
def loss_masks(self, masks_queries_logits: torch.Tensor, mask_labels: list[torch.Tensor], indices: tuple[np.array], num_masks: int) -> dict[str, torch.Tensor]:
"""Compute the losses related to the masks using sigmoid_cross_entropy_loss and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `(batch_size, num_queries, height, width)`.
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
losses (`dict[str, Tensor]`): A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid cross entropy loss on the predicted and ground truth.
masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth,
masks.
"""
src_idx = self._get_predictions_permutation_indices(indices)
tgt_idx = self._get_targets_permutation_indices(indices)
pred_masks = masks_queries_logits[src_idx]
target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)
target_masks = target_masks[tgt_idx]
pred_masks = pred_masks[:, None]
target_masks = target_masks[:, None]
with torch.no_grad():
point_coordinates = self.sample_points_using_uncertainty(pred_masks, lambda logits: self.calculate_uncertainty(logits), self.num_points, self.oversample_ratio, self.importance_sample_ratio)
point_labels = sample_point(target_masks, point_coordinates, align_corners=False).squeeze(1)
point_logits = sample_point(pred_masks, point_coordinates, align_corners=False).squeeze(1)
losses = {'loss_mask': sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks), 'loss_dice': dice_loss(point_logits, point_labels, num_masks)}
del pred_masks
del target_masks
return losses
def _get_predictions_permutation_indices(self, indices):
batch_indices = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
predictions_indices = torch.cat([src for src, _ in indices])
return (batch_indices, predictions_indices)
def _get_targets_permutation_indices(self, indices):
batch_indices = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
target_indices = torch.cat([tgt for _, tgt in indices])
return (batch_indices, target_indices)
def calculate_uncertainty(self, logits: torch.Tensor) -> torch.Tensor:
"""
In Mask2Former paper, uncertainty is estimated as L1 distance between 0.0 and the logit prediction in 'logits'
for the foreground class in `classes`.
Args:
logits (`torch.Tensor`):
A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is:
the number of foreground classes. The values are logits.
Returns:
scores (`torch.Tensor`): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most
uncertain locations having the highest uncertainty score.
"""
uncertainty_scores = -torch.abs(logits)
return uncertainty_scores
def sample_points_using_uncertainty(self, logits: torch.Tensor, uncertainty_function, num_points: int, oversample_ratio: int, importance_sample_ratio: float) -> torch.Tensor:
"""
This function is meant for sampling points in [0, 1] * [0, 1] coordinate space based on their uncertainty. The
uncertainty is calculated for each point using the passed `uncertainty function` that takes points logit
prediction as input.
Args:
logits (`float`):
Logit predictions for P points.
uncertainty_function:
A function that takes logit predictions for P points and returns their uncertainties.
num_points (`int`):
The number of points P to sample.
oversample_ratio (`int`):
Oversampling parameter.
importance_sample_ratio (`float`):
Ratio of points that are sampled via importance sampling.
Returns:
point_coordinates (`torch.Tensor`):
Coordinates for P sampled points.
"""
num_boxes = logits.shape[0]
num_points_sampled = int(num_points * oversample_ratio)
point_coordinates = torch.rand(num_boxes, num_points_sampled, 2, device=logits.device)
point_logits = sample_point(logits, point_coordinates, align_corners=False)
point_uncertainties = uncertainty_function(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_points_sampled * torch.arange(num_boxes, dtype=torch.long, device=logits.device)
idx += shift[:, None]
point_coordinates = point_coordinates.view(-1, 2)[idx.view(-1), :].view(num_boxes, num_uncertain_points, 2)
if num_random_points > 0:
point_coordinates = torch.cat([point_coordinates, torch.rand(num_boxes, num_random_points, 2, device=logits.device)], dim=1)
return point_coordinates
def forward(self, masks_queries_logits: torch.Tensor, class_queries_logits: torch.Tensor, mask_labels: list[torch.Tensor], class_labels: list[torch.Tensor], auxiliary_predictions: Optional[dict[str, torch.Tensor]]=None) -> dict[str, torch.Tensor]:
"""
This performs the loss computation.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `(batch_size, num_queries, height, width)`.
class_queries_logits (`torch.Tensor`):
A tensor of shape `(batch_size, num_queries, num_labels)`.
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
auxiliary_predictions (`dict[str, torch.Tensor]`, *optional*):
if `use_auxiliary_loss` was set to `true` in [`Mask2FormerConfig`], then it contains the logits from
the inner layers of the Mask2FormerMaskedAttentionDecoder.
Returns:
losses (`dict[str, Tensor]`): A dict of `torch.Tensor` containing three keys:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
- **loss_mask** -- The loss computed using sigmoid cross_entropy loss on the predicted and ground truth
masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
if `use_auxiliary_loss` was set to `true` in [`Mask2FormerConfig`], the dictionary contains additional
losses for each auxiliary predictions.
"""
indices = self.matcher(masks_queries_logits, class_queries_logits, mask_labels, class_labels)
num_masks = self.get_num_masks(class_labels, device=class_labels[0].device)
losses: dict[str, Tensor] = {**self.loss_masks(masks_queries_logits, mask_labels, indices, num_masks), **self.loss_labels(class_queries_logits, class_labels, indices)}
if auxiliary_predictions is not None:
for idx, aux_outputs in enumerate(auxiliary_predictions):
masks_queries_logits = aux_outputs['masks_queries_logits']
class_queries_logits = aux_outputs['class_queries_logits']
loss_dict = self.forward(masks_queries_logits, class_queries_logits, mask_labels, class_labels)
loss_dict = {f'{key}_{idx}': value for key, value in loss_dict.items()}
losses.update(loss_dict)
return losses
def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor:
"""
Computes the average number of target masks across the batch, for normalization purposes.
"""
num_masks = sum([len(classes) for classes in class_labels])
num_masks = torch.as_tensor(num_masks, dtype=torch.float, device=device)
world_size = 1
if is_accelerate_available():
if PartialState._shared_state != {}:
num_masks = reduce(num_masks)
world_size = PartialState().num_processes
num_masks = torch.clamp(num_masks / world_size, min=1)
return num_masks
|
class Mask2FormerLoss(nn.Module):
def __init__(self, config: Mask2FormerConfig, weight_dict: dict[str, float]):
'''
The Mask2Former Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we
compute hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair
of matched ground-truth / prediction (supervise class and mask)
Args:
config (`Mask2FormerConfig`):
The configuration for Mask2Former model also containing loss calculation specific parameters.
weight_dict (`dict[str, float]`):
A dictionary of weights to be applied to the different losses.
'''
pass
def _max_by_axis(self, sizes: list[list[int]]) -> list[int]:
pass
def _pad_images_to_max_in_batch(self, tensors: list[Tensor]) -> tuple[Tensor, Tensor]:
pass
def loss_labels(self, class_queries_logits: Tensor, class_labels: list[Tensor], indices: tuple[np.array]) -> dict[str, Tensor]:
'''Compute the losses related to the labels using cross entropy.
Args:
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
'''
pass
def loss_masks(self, masks_queries_logits: torch.Tensor, mask_labels: list[torch.Tensor], indices: tuple[np.array], num_masks: int) -> dict[str, torch.Tensor]:
'''Compute the losses related to the masks using sigmoid_cross_entropy_loss and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `(batch_size, num_queries, height, width)`.
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
losses (`dict[str, Tensor]`): A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid cross entropy loss on the predicted and ground truth.
masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth,
masks.
'''
pass
def _get_predictions_permutation_indices(self, indices):
pass
def _get_targets_permutation_indices(self, indices):
pass
def calculate_uncertainty(self, logits: torch.Tensor) -> torch.Tensor:
'''
In Mask2Former paper, uncertainty is estimated as L1 distance between 0.0 and the logit prediction in 'logits'
for the foreground class in `classes`.
Args:
logits (`torch.Tensor`):
A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is:
the number of foreground classes. The values are logits.
Returns:
scores (`torch.Tensor`): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most
uncertain locations having the highest uncertainty score.
'''
pass
def sample_points_using_uncertainty(self, logits: torch.Tensor, uncertainty_function, num_points: int, oversample_ratio: int, importance_sample_ratio: float) -> torch.Tensor:
'''
This function is meant for sampling points in [0, 1] * [0, 1] coordinate space based on their uncertainty. The
uncertainty is calculated for each point using the passed `uncertainty function` that takes points logit
prediction as input.
Args:
logits (`float`):
Logit predictions for P points.
uncertainty_function:
A function that takes logit predictions for P points and returns their uncertainties.
num_points (`int`):
The number of points P to sample.
oversample_ratio (`int`):
Oversampling parameter.
importance_sample_ratio (`float`):
Ratio of points that are sampled via importance sampling.
Returns:
point_coordinates (`torch.Tensor`):
Coordinates for P sampled points.
'''
pass
def forward(self, masks_queries_logits: torch.Tensor, class_queries_logits: torch.Tensor, mask_labels: list[torch.Tensor], class_labels: list[torch.Tensor], auxiliary_predictions: Optional[dict[str, torch.Tensor]]=None) -> dict[str, torch.Tensor]:
'''
This performs the loss computation.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `(batch_size, num_queries, height, width)`.
class_queries_logits (`torch.Tensor`):
A tensor of shape `(batch_size, num_queries, num_labels)`.
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
auxiliary_predictions (`dict[str, torch.Tensor]`, *optional*):
if `use_auxiliary_loss` was set to `true` in [`Mask2FormerConfig`], then it contains the logits from
the inner layers of the Mask2FormerMaskedAttentionDecoder.
Returns:
losses (`dict[str, Tensor]`): A dict of `torch.Tensor` containing three keys:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
- **loss_mask** -- The loss computed using sigmoid cross_entropy loss on the predicted and ground truth
masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
if `use_auxiliary_loss` was set to `true` in [`Mask2FormerConfig`], the dictionary contains additional
losses for each auxiliary predictions.
'''
pass
def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor:
'''
Computes the average number of target masks across the batch, for normalization purposes.
'''
pass
| 12
| 7
| 27
| 3
| 14
| 11
| 2
| 0.77
| 1
| 12
| 2
| 0
| 11
| 7
| 11
| 21
| 310
| 39
| 154
| 91
| 120
| 119
| 108
| 69
| 96
| 3
| 1
| 2
| 19
|
3,635
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerMLPPredictionHead
|
from torch import Tensor, nn
class Mask2FormerMLPPredictionHead(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int=3):
"""
A classic Multi Layer Perceptron (MLP).
Args:
input_dim (`int`):
The input dimensions.
hidden_dim (`int`):
The hidden dimensions.
output_dim (`int`):
The output dimensions.
num_layers (int, *optional*, defaults to 3):
The number of layers.
"""
super().__init__()
in_dims = [input_dim] + [hidden_dim] * (num_layers - 1)
out_dims = [hidden_dim] * (num_layers - 1) + [output_dim]
self.layers = []
for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):
activation = nn.ReLU() if i < num_layers - 1 else nn.Identity()
layer = Mask2FormerPredictionBlock(in_dim, out_dim, activation=activation)
self.layers.append(layer)
self.add_module(str(i), layer)
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
|
class Mask2FormerMLPPredictionHead(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int=3):
'''
A classic Multi Layer Perceptron (MLP).
Args:
input_dim (`int`):
The input dimensions.
hidden_dim (`int`):
The hidden dimensions.
output_dim (`int`):
The output dimensions.
num_layers (int, *optional*, defaults to 3):
The number of layers.
'''
pass
def forward(self, input: Tensor) -> Tensor:
pass
| 3
| 1
| 18
| 1
| 8
| 9
| 3
| 1.13
| 1
| 7
| 1
| 0
| 2
| 1
| 2
| 12
| 37
| 3
| 16
| 11
| 13
| 18
| 16
| 11
| 13
| 3
| 1
| 1
| 5
|
3,636
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerMaskPredictor
|
from torch import Tensor, nn
from typing import Optional, Union
import torch
class Mask2FormerMaskPredictor(nn.Module):
def __init__(self, hidden_size: int, num_heads: int, mask_feature_size: torch.Tensor):
"""
This class is used to get the predicted mask for a given Mask2FormerMaskedAttentionDecoder layer. It also
generates the binarized attention mask associated with the given predicted mask. The attention mask obtained
using predicted mask of the (l-1)th decoder layer is fed to the cross(masked)-attention block of the next
decoder layer as input.
Args:
hidden_size (`int`):
The feature dimension of the Mask2FormerMaskedAttentionDecoder
num_heads (`int`):
The number of heads used in the Mask2FormerMaskedAttentionDecoder
mask_feature_size (`torch.Tensor`):
one of the output dimensions of the predicted masks for each query
"""
super().__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.mask_embedder = Mask2FormerMLPPredictionHead(self.hidden_size, self.hidden_size, mask_feature_size)
def forward(self, outputs: torch.Tensor, pixel_embeddings: torch.Tensor, attention_mask_target_size: Optional[int]=None):
mask_embeddings = self.mask_embedder(outputs.transpose(0, 1))
outputs_mask = torch.einsum('bqc, bchw -> bqhw', mask_embeddings, pixel_embeddings)
attention_mask = nn.functional.interpolate(outputs_mask, size=attention_mask_target_size, mode='bilinear', align_corners=False)
attention_mask = attention_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1)
attention_mask = (attention_mask.flatten(0, 1) < 0.5).bool()
attention_mask = attention_mask.detach()
return (outputs_mask, attention_mask)
|
class Mask2FormerMaskPredictor(nn.Module):
def __init__(self, hidden_size: int, num_heads: int, mask_feature_size: torch.Tensor):
'''
This class is used to get the predicted mask for a given Mask2FormerMaskedAttentionDecoder layer. It also
generates the binarized attention mask associated with the given predicted mask. The attention mask obtained
using predicted mask of the (l-1)th decoder layer is fed to the cross(masked)-attention block of the next
decoder layer as input.
Args:
hidden_size (`int`):
The feature dimension of the Mask2FormerMaskedAttentionDecoder
num_heads (`int`):
The number of heads used in the Mask2FormerMaskedAttentionDecoder
mask_feature_size (`torch.Tensor`):
one of the output dimensions of the predicted masks for each query
'''
pass
def forward(self, outputs: torch.Tensor, pixel_embeddings: torch.Tensor, attention_mask_target_size: Optional[int]=None):
pass
| 3
| 1
| 23
| 4
| 12
| 8
| 2
| 0.63
| 1
| 6
| 1
| 0
| 2
| 3
| 2
| 12
| 47
| 8
| 24
| 13
| 21
| 15
| 21
| 13
| 18
| 3
| 1
| 2
| 4
|
3,637
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerMaskedAttentionDecoder
|
from torch import Tensor, nn
from .configuration_mask2former import Mask2FormerConfig
from typing import Optional, Union
import torch
class Mask2FormerMaskedAttentionDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
[`Mask2FormerMaskedAttentionDecoderLayer`]. The decoder updates the query embeddings through multiple cross
(masked) and self-attention layers. The decoder uses a new **masked attention** mechanism instead of the standard
cross-attention, which extracts localized features by constraining cross-attention to within the foreground region
of the predicted mask for each query, instead of attending to the full feature map.
Args:
config (`Mask2FormerConfig`):
Configuration used to instantiate Mask2FormerMaskedAttentionDecoder.
"""
def __init__(self, config: Mask2FormerConfig):
super().__init__()
self.config = config
self.mask_feature_size = config.mask_feature_size
self.dropout = config.dropout
self.layerdrop = config.dropout
self.num_feature_levels = 3
self.decoder_layers = config.decoder_layers - 1
self.layers = nn.ModuleList([Mask2FormerMaskedAttentionDecoderLayer(self.config) for _ in range(self.decoder_layers)])
self.layernorm = nn.LayerNorm(config.hidden_dim)
self.mask_predictor = Mask2FormerMaskPredictor(hidden_size=config.hidden_dim, num_heads=config.num_attention_heads, mask_feature_size=self.mask_feature_size)
self.gradient_checkpointing = False
def forward(self, inputs_embeds: Optional[torch.Tensor]=None, multi_stage_positional_embeddings: Optional[torch.Tensor]=None, pixel_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, feature_size_list: Optional[list]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(num_queries, batch_size, hidden_size)`):
The query embeddings that are passed into the decoder.
multi_stage_positional_embeddings (`torch.FloatTensor` of shape `(height*width, batch_size, num_channels)`):
Position embeddings that are added to the keys in each cross(masked)-attention layer.
pixel_embeddings (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel
Decoder.
query_position_embeddings (`torch.FloatTensor` of shape `(num_queries, batch_size, hidden_size)`):
, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer.
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross(masked)-attention of the decoder.
feature_size_list (`list[torch.Size]`):
This is a list containing shapes (height & width) of multi-scale features from the Pixel Decoder.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if inputs_embeds is not None:
hidden_states = inputs_embeds
intermediate = ()
all_hidden_states = () if output_hidden_states else None
attentions = () if output_attentions else None
intermediate_mask_predictions = ()
intermediate_hidden_states = self.layernorm(inputs_embeds)
intermediate += (intermediate_hidden_states,)
predicted_mask, attention_mask = self.mask_predictor(intermediate_hidden_states, pixel_embeddings, feature_size_list[0])
intermediate_mask_predictions += (predicted_mask,)
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = torch.rand([])
if self.training and dropout_probability < self.layerdrop:
continue
level_index = idx % self.num_feature_levels
where = (attention_mask.sum(-1) != attention_mask.shape[-1]).to(attention_mask.dtype)
attention_mask = attention_mask * where.unsqueeze(-1)
layer_outputs = decoder_layer(hidden_states, level_index, None, multi_stage_positional_embeddings, query_position_embeddings, encoder_hidden_states, encoder_attention_mask=attention_mask, output_attentions=output_attentions)
intermediate_hidden_states = self.layernorm(layer_outputs[0])
predicted_mask, attention_mask = self.mask_predictor(intermediate_hidden_states, pixel_embeddings, feature_size_list[(idx + 1) % self.num_feature_levels])
intermediate_mask_predictions += (predicted_mask,)
intermediate += (intermediate_hidden_states,)
hidden_states = layer_outputs[0]
if output_attentions:
attentions += (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
hidden_states = hidden_states.transpose(1, 0)
if not return_dict:
outputs = [hidden_states, all_hidden_states, attentions, intermediate, intermediate_mask_predictions]
return tuple((v for v in outputs if v is not None))
return Mask2FormerMaskedAttentionDecoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=attentions, intermediate_hidden_states=intermediate, masks_queries_logits=intermediate_mask_predictions)
|
class Mask2FormerMaskedAttentionDecoder(nn.Module):
'''
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
[`Mask2FormerMaskedAttentionDecoderLayer`]. The decoder updates the query embeddings through multiple cross
(masked) and self-attention layers. The decoder uses a new **masked attention** mechanism instead of the standard
cross-attention, which extracts localized features by constraining cross-attention to within the foreground region
of the predicted mask for each query, instead of attending to the full feature map.
Args:
config (`Mask2FormerConfig`):
Configuration used to instantiate Mask2FormerMaskedAttentionDecoder.
'''
def __init__(self, config: Mask2FormerConfig):
pass
def forward(self, inputs_embeds: Optional[torch.Tensor]=None, multi_stage_positional_embeddings: Optional[torch.Tensor]=None, pixel_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, feature_size_list: Optional[list]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(num_queries, batch_size, hidden_size)`):
The query embeddings that are passed into the decoder.
multi_stage_positional_embeddings (`torch.FloatTensor` of shape `(height*width, batch_size, num_channels)`):
Position embeddings that are added to the keys in each cross(masked)-attention layer.
pixel_embeddings (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel
Decoder.
query_position_embeddings (`torch.FloatTensor` of shape `(num_queries, batch_size, hidden_size)`):
, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer.
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross(masked)-attention of the decoder.
feature_size_list (`list[torch.Size]`):
This is a list containing shapes (height & width) of multi-scale features from the Pixel Decoder.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 79
| 13
| 50
| 16
| 8
| 0.42
| 1
| 10
| 4
| 0
| 2
| 10
| 2
| 12
| 171
| 29
| 101
| 38
| 87
| 42
| 53
| 26
| 50
| 14
| 1
| 2
| 15
|
3,638
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerMaskedAttentionDecoderLayer
|
from typing import Optional, Union
from ...activations import ACT2FN
from .configuration_mask2former import Mask2FormerConfig
from torch import Tensor, nn
from ...modeling_layers import GradientCheckpointingLayer
import torch
class Mask2FormerMaskedAttentionDecoderLayer(GradientCheckpointingLayer):
"""
The Mask2FormerMaskedAttentionDecoderLayer is made up of self-attention, cross (masked) attention as well as FFN
blocks. The cross attention block used as part of `Mask2FormerMaskedAttentionDecoderLayer` is actually a `masked
attention` block that restricts the attention to localized features centered around predicted segments which leads
to faster convergence and improved performance. The order of self and cross (i.e. masked) attention blocks have
also been swapped in Mask2FormerMaskedAttentionDecoder compared to a standard DetrDecoder as an optimization
improvement.
Args:
config (`Mask2FormerConfig`):
The configuration used to initialize the Mask2FormerMaskedAttentionDecoder.
"""
def __init__(self, config: Mask2FormerConfig):
super().__init__()
self.config = config
self.embed_dim = self.config.hidden_dim
self.pre_norm = self.config.pre_norm
self.self_attn = Mask2FormerAttention(embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.dropout, is_decoder=True)
self.dropout = self.config.dropout
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout = self.config.dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.cross_attn = nn.MultiheadAttention(self.embed_dim, self.config.num_attention_heads, self.config.dropout)
self.cross_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, self.config.dim_feedforward)
self.fc2 = nn.Linear(self.config.dim_feedforward, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, hidden_states: torch.Tensor, level_index: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
cross_attn_weights = None
self_attn_weights = None
residual = hidden_states
hidden_states, cross_attn_weights = self.cross_attn(query=self.with_pos_embed(hidden_states, query_position_embeddings), key=self.with_pos_embed(encoder_hidden_states[level_index], position_embeddings[level_index]), value=encoder_hidden_states[level_index], attn_mask=encoder_attention_mask, key_padding_mask=None)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.cross_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, position_embeddings=query_position_embeddings, attention_mask=None, output_attentions=True)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
def forward_pre(self, hidden_states: torch.Tensor, level_index: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
cross_attn_weights = None
self_attn_weights = None
residual = hidden_states
hidden_states = self.cross_attn_layer_norm(hidden_states)
hidden_states, cross_attn_weights = self.cross_attn(query=self.with_pos_embed(hidden_states, query_position_embeddings), key=self.with_pos_embed(encoder_hidden_states[level_index], position_embeddings[level_index]), value=encoder_hidden_states[level_index], attn_mask=encoder_attention_mask, key_padding_mask=None)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, position_embeddings=query_position_embeddings, attention_mask=None, output_attentions=True)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
def forward(self, hidden_states: torch.Tensor, level_index: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(seq_len, batch, embed_dim)`.
attention_mask (`torch.FloatTensor`):
Attention mask of shape `(1, seq_len, tgt_len, src_len)`.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings that are added to the keys in the masked-attention layer.
query_position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings that are added to the queries and keys in the self-attention layer.
encoder_hidden_states (`torch.FloatTensor`):
Cross attention input to the layer of shape `(seq_len, batch, embed_dim)`.
encoder_attention_mask (`torch.FloatTensor`):
Encoder attention mask of size`(1, seq_len, tgt_len, src_len)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
if self.pre_norm:
outputs = self.forward_pre(hidden_states=hidden_states, level_index=level_index, position_embeddings=position_embeddings, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
else:
outputs = self.forward_post(hidden_states=hidden_states, level_index=level_index, position_embeddings=position_embeddings, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
return outputs
|
class Mask2FormerMaskedAttentionDecoderLayer(GradientCheckpointingLayer):
'''
The Mask2FormerMaskedAttentionDecoderLayer is made up of self-attention, cross (masked) attention as well as FFN
blocks. The cross attention block used as part of `Mask2FormerMaskedAttentionDecoderLayer` is actually a `masked
attention` block that restricts the attention to localized features centered around predicted segments which leads
to faster convergence and improved performance. The order of self and cross (i.e. masked) attention blocks have
also been swapped in Mask2FormerMaskedAttentionDecoder compared to a standard DetrDecoder as an optimization
improvement.
Args:
config (`Mask2FormerConfig`):
The configuration used to initialize the Mask2FormerMaskedAttentionDecoder.
'''
def __init__(self, config: Mask2FormerConfig):
pass
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
pass
def forward_post(self, hidden_states: torch.Tensor, level_index: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
pass
def forward_pre(self, hidden_states: torch.Tensor, level_index: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
pass
def forward_post(self, hidden_states: torch.Tensor, level_index: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
'''
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(seq_len, batch, embed_dim)`.
attention_mask (`torch.FloatTensor`):
Attention mask of shape `(1, seq_len, tgt_len, src_len)`.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings that are added to the keys in the masked-attention layer.
query_position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings that are added to the queries and keys in the self-attention layer.
encoder_hidden_states (`torch.FloatTensor`):
Cross attention input to the layer of shape `(seq_len, batch, embed_dim)`.
encoder_attention_mask (`torch.FloatTensor`):
Encoder attention mask of size`(1, seq_len, tgt_len, src_len)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 6
| 2
| 39
| 5
| 29
| 5
| 2
| 0.24
| 1
| 6
| 2
| 0
| 5
| 13
| 5
| 15
| 212
| 32
| 145
| 58
| 109
| 35
| 71
| 28
| 65
| 2
| 1
| 1
| 9
|
3,639
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerModel
|
from ...utils import auto_docstring, is_accelerate_available, logging
from typing import Optional, Union
from .configuration_mask2former import Mask2FormerConfig
from torch import Tensor, nn
import torch
@auto_docstring
class Mask2FormerModel(Mask2FormerPreTrainedModel):
main_input_name = 'pixel_values'
def __init__(self, config: Mask2FormerConfig):
super().__init__(config)
self.pixel_level_module = Mask2FormerPixelLevelModule(config)
self.transformer_module = Mask2FormerTransformerModule(in_features=config.feature_size, config=config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: Tensor, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Mask2FormerModelOutput:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, _, height, width = pixel_values.shape
if pixel_mask is None:
pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device)
pixel_level_module_output = self.pixel_level_module(pixel_values=pixel_values, output_hidden_states=output_hidden_states)
transformer_module_output = self.transformer_module(multi_scale_features=pixel_level_module_output.decoder_hidden_states, mask_features=pixel_level_module_output.decoder_last_hidden_state, output_hidden_states=True, output_attentions=output_attentions)
encoder_hidden_states = None
pixel_decoder_hidden_states = None
transformer_decoder_hidden_states = None
transformer_decoder_intermediate_states = None
if output_hidden_states:
encoder_hidden_states = pixel_level_module_output.encoder_hidden_states
pixel_decoder_hidden_states = pixel_level_module_output.decoder_hidden_states
transformer_decoder_hidden_states = transformer_module_output.hidden_states
transformer_decoder_intermediate_states = transformer_module_output.intermediate_hidden_states
output = Mask2FormerModelOutput(encoder_last_hidden_state=pixel_level_module_output.encoder_last_hidden_state, pixel_decoder_last_hidden_state=pixel_level_module_output.decoder_last_hidden_state, transformer_decoder_last_hidden_state=transformer_module_output.last_hidden_state, encoder_hidden_states=encoder_hidden_states, pixel_decoder_hidden_states=pixel_decoder_hidden_states, transformer_decoder_hidden_states=transformer_decoder_hidden_states, transformer_decoder_intermediate_states=transformer_decoder_intermediate_states, attentions=transformer_module_output.attentions, masks_queries_logits=transformer_module_output.masks_queries_logits)
if not return_dict:
output = tuple((v for v in output.values() if v is not None))
return output
|
@auto_docstring
class Mask2FormerModel(Mask2FormerPreTrainedModel):
def __init__(self, config: Mask2FormerConfig):
pass
@auto_docstring
def forward(self, pixel_values: Tensor, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Mask2FormerModelOutput:
pass
| 5
| 0
| 46
| 8
| 27
| 12
| 4
| 0.42
| 1
| 8
| 4
| 0
| 2
| 2
| 2
| 3
| 98
| 17
| 57
| 22
| 45
| 24
| 29
| 14
| 26
| 7
| 2
| 1
| 8
|
3,640
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerModelOutput
|
from ...utils import auto_docstring, is_accelerate_available, logging
from ...file_utils import ModelOutput, is_scipy_available, requires_backends
from typing import Optional, Union
from dataclasses import dataclass
import torch
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`Mask2FormerModel`]. This class returns all the needed hidden states to compute the logits.\n ')
class Mask2FormerModelOutput(ModelOutput):
"""
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`, *optional*):
Last hidden states (final feature map) of the last stage of the encoder model (backbone). Returned when
`output_hidden_states=True` is passed.
pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`, *optional*):
Last hidden states (final feature map) of the last stage of the pixel decoder model.
transformer_decoder_last_hidden_state (`tuple(torch.FloatTensor)`):
Final output of the transformer decoder `(batch_size, sequence_length, hidden_size)`.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage. Returned when `output_hidden_states=True` is passed.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, , *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage. Returned when `output_hidden_states=True` is passed.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage. Returned when `output_hidden_states=True` is passed.
transformer_decoder_intermediate_states (`tuple(torch.FloatTensor)` of shape `(num_queries, 1, hidden_size)`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
masks_queries_logits (`tuple(torch.FloatTensor)` of shape `(batch_size, num_queries, height, width)`)
Mask Predictions from each layer in the transformer decoder.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self attentions weights from transformer decoder.
"""
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
pixel_decoder_last_hidden_state: Optional[torch.FloatTensor] = None
transformer_decoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
transformer_decoder_intermediate_states: Optional[tuple[torch.FloatTensor]] = None
masks_queries_logits: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
| null | 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 2
| 10
| 10
| 9
| 31
| 10
| 10
| 9
| 0
| 1
| 0
| 0
|
3,641
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoder
|
from torch import Tensor, nn
from .configuration_mask2former import Mask2FormerConfig
import numpy as np
import torch
class Mask2FormerPixelDecoder(nn.Module):
def __init__(self, config: Mask2FormerConfig, feature_channels):
super().__init__()
self.config = config
feature_dim = config.feature_size
mask_dim = config.mask_feature_size
num_pos_features = feature_dim // 2
self.position_embedding = Mask2FormerSinePositionEmbedding(num_pos_feats=num_pos_features, normalize=True)
self.num_feature_levels = 3
transformer_in_channels = feature_channels[-self.num_feature_levels:]
self.transformer_feature_strides = config.feature_strides[-self.num_feature_levels:]
self.feature_channels = feature_channels
self.level_embed = nn.Parameter(torch.Tensor(self.num_feature_levels, feature_dim))
if self.num_feature_levels > 1:
input_projections_list = []
for in_channels in transformer_in_channels[::-1]:
input_projections_list.append(nn.Sequential(nn.Conv2d(in_channels, feature_dim, kernel_size=1), nn.GroupNorm(32, feature_dim)))
self.input_projections = nn.ModuleList(input_projections_list)
else:
self.input_projections = nn.ModuleList([nn.Sequential(nn.Conv2d(transformer_in_channels[-1], feature_dim, kernel_size=1), nn.GroupNorm(32, feature_dim))])
self.encoder = Mask2FormerPixelDecoderEncoderOnly(config)
self.mask_projection = nn.Conv2d(feature_dim, mask_dim, kernel_size=1, stride=1, padding=0)
stride = min(self.transformer_feature_strides)
self.common_stride = config.common_stride
self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride))
lateral_convs = []
output_convs = []
for idx, in_channels in enumerate(self.feature_channels[:self.num_fpn_levels]):
lateral_conv = nn.Sequential(nn.Conv2d(in_channels, feature_dim, kernel_size=1, bias=False), nn.GroupNorm(32, feature_dim))
output_conv = nn.Sequential(nn.Conv2d(feature_dim, feature_dim, kernel_size=3, stride=1, padding=1, bias=False), nn.GroupNorm(32, feature_dim), nn.ReLU())
self.add_module(f'adapter_{idx + 1}', lateral_conv)
self.add_module(f'layer_{idx + 1}', output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
self.lateral_convolutions = lateral_convs[::-1]
self.output_convolutions = output_convs[::-1]
def get_valid_ratio(self, mask, dtype=torch.float32):
"""Get the valid ratio of all feature maps."""
_, height, width = mask.shape
valid_height = torch.sum(~mask[:, :, 0], 1)
valid_width = torch.sum(~mask[:, 0, :], 1)
valid_ratio_height = valid_height.to(dtype) / height
valid_ratio_width = valid_width.to(dtype) / width
valid_ratio = torch.stack([valid_ratio_width, valid_ratio_height], -1)
return valid_ratio
def forward(self, features, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
input_embeds = []
position_embeddings = []
for level, x in enumerate(features[::-1][:self.num_feature_levels]):
input_embeds.append(self.input_projections[level](x))
position_embeddings.append(self.position_embedding(x.shape, x.device, x.dtype))
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in input_embeds]
spatial_shapes_list = [(embed.shape[2], embed.shape[3]) for embed in input_embeds]
input_embeds_flat = torch.cat([embed.flatten(2).transpose(1, 2) for embed in input_embeds], 1)
spatial_shapes = torch.as_tensor(spatial_shapes_list, dtype=torch.long, device=input_embeds_flat.device)
masks_flat = torch.cat([mask.flatten(1) for mask in masks], 1)
position_embeddings = [embed.flatten(2).transpose(1, 2) for embed in position_embeddings]
level_pos_embed_flat = [x + self.level_embed[i].view(1, 1, -1) for i, x in enumerate(position_embeddings)]
level_pos_embed_flat = torch.cat(level_pos_embed_flat, 1)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(mask, dtype=input_embeds_flat.dtype) for mask in masks], 1)
if encoder_outputs is None:
encoder_outputs = self.encoder(inputs_embeds=input_embeds_flat, attention_mask=masks_flat, position_embeddings=level_pos_embed_flat, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs.last_hidden_state
batch_size = last_hidden_state.shape[0]
level_start_index_list = [0]
for height, width in spatial_shapes_list[:-1]:
level_start_index_list.append(level_start_index_list[-1] + height * width)
split_sizes = [None] * self.num_feature_levels
for i in range(self.num_feature_levels):
if i < self.num_feature_levels - 1:
split_sizes[i] = level_start_index_list[i + 1] - level_start_index_list[i]
else:
split_sizes[i] = last_hidden_state.shape[1] - level_start_index_list[i]
encoder_output = torch.split(last_hidden_state, split_sizes, dim=1)
outputs = [x.transpose(1, 2).view(batch_size, -1, spatial_shapes_list[i][0], spatial_shapes_list[i][1]) for i, x in enumerate(encoder_output)]
for idx, feature in enumerate(features[:self.num_fpn_levels][::-1]):
lateral_conv = self.lateral_convolutions[idx]
output_conv = self.output_convolutions[idx]
current_fpn = lateral_conv(feature)
out = current_fpn + nn.functional.interpolate(outputs[-1], size=current_fpn.shape[-2:], mode='bilinear', align_corners=False)
out = output_conv(out)
outputs.append(out)
num_cur_levels = 0
multi_scale_features = []
for out in outputs:
if num_cur_levels < self.num_feature_levels:
multi_scale_features.append(out)
num_cur_levels += 1
return Mask2FormerPixelDecoderOutput(mask_features=self.mask_projection(outputs[-1]), multi_scale_features=tuple(multi_scale_features), attentions=encoder_outputs.attentions)
|
class Mask2FormerPixelDecoder(nn.Module):
def __init__(self, config: Mask2FormerConfig, feature_channels):
pass
def get_valid_ratio(self, mask, dtype=torch.float32):
'''Get the valid ratio of all feature maps.'''
pass
def forward(self, features, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None):
pass
| 4
| 1
| 60
| 9
| 46
| 4
| 5
| 0.09
| 1
| 11
| 4
| 0
| 3
| 13
| 3
| 13
| 182
| 30
| 140
| 67
| 129
| 12
| 89
| 60
| 85
| 11
| 1
| 2
| 16
|
3,642
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoderEncoderLayer
|
from torch import Tensor, nn
from .configuration_mask2former import Mask2FormerConfig
from typing import Optional, Union
import torch
class Mask2FormerPixelDecoderEncoderLayer(nn.Module):
def __init__(self, config: Mask2FormerConfig):
super().__init__()
self.embed_dim = config.feature_size
self.self_attn = Mask2FormerPixelDecoderEncoderMultiscaleDeformableAttention(embed_dim=self.embed_dim, num_heads=config.num_attention_heads, n_levels=3, n_points=4)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = nn.functional.relu
self.activation_dropout = config.dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_feedforward_dim)
self.fc2 = nn.Linear(config.encoder_feedforward_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes_list (`list` of `tuple`):
Spatial shapes of the backbone feature maps as a list of tuples.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights.transpose(1, 0),)
return outputs
|
class Mask2FormerPixelDecoderEncoderLayer(nn.Module):
def __init__(self, config: Mask2FormerConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):
'''
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes_list (`list` of `tuple`):
Spatial shapes of the backbone feature maps as a list of tuples.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 43
| 5
| 28
| 10
| 3
| 0.33
| 1
| 5
| 2
| 0
| 2
| 9
| 2
| 12
| 87
| 11
| 57
| 25
| 45
| 19
| 33
| 16
| 30
| 4
| 1
| 2
| 5
|
3,643
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoderEncoderMultiscaleDeformableAttention
|
from torch import Tensor, nn
import warnings
from typing import Optional, Union
import torch
class Mask2FormerPixelDecoderEncoderMultiscaleDeformableAttention(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int):
super().__init__()
if embed_dim % num_heads != 0:
raise ValueError(f'embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}')
dim_per_head = embed_dim // num_heads
if not (dim_per_head & dim_per_head - 1 == 0 and dim_per_head != 0):
warnings.warn("You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the dimension of each attention head a power of 2 which is more efficient in the authors' CUDA implementation.")
self.im2col_step = 128
self.d_model = embed_dim
self.n_levels = n_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points)
self.value_proj = nn.Linear(embed_dim, embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
total_elements = sum((height * width for height, width in spatial_shapes_list))
if total_elements != sequence_length:
raise ValueError('Make sure to align the spatial shapes with the sequence length of the encoder hidden states')
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
value = value.masked_fill(attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels * self.n_points)
attention_weights = nn.functional.softmax(attention_weights, -1).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points)
if reference_points.shape[-1] == 2:
offset_normalizer = torch.tensor([[shape[1], shape[0]] for shape in spatial_shapes_list], dtype=torch.long, device=reference_points.device)
sampling_locations = reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}')
output = multi_scale_deformable_attention(value, spatial_shapes_list, sampling_locations, attention_weights)
output = self.output_proj(output)
return (output, attention_weights)
|
class Mask2FormerPixelDecoderEncoderMultiscaleDeformableAttention(nn.Module):
'''
Multiscale deformable attention as proposed in Deformable DETR.
'''
def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int):
pass
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):
pass
| 4
| 1
| 30
| 2
| 26
| 1
| 4
| 0.09
| 1
| 6
| 0
| 0
| 3
| 9
| 3
| 13
| 96
| 10
| 79
| 34
| 64
| 7
| 42
| 23
| 38
| 6
| 1
| 1
| 11
|
3,644
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoderEncoderOnly
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions
from torch import Tensor, nn
from .configuration_mask2former import Mask2FormerConfig
import torch
class Mask2FormerPixelDecoderEncoderOnly(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
[`Mask2FormerPixelDecoderEncoderLayer`]. The encoder updates the flattened multi-scale feature maps through
multiple deformable attention layers.
Args:
config: Mask2FormerConfig
"""
def __init__(self, config: Mask2FormerConfig):
super().__init__()
self.config = config
self.dropout = config.dropout
self.layers = nn.ModuleList([Mask2FormerPixelDecoderEncoderLayer(config) for _ in range(config.encoder_layers)])
@staticmethod
def get_reference_points(spatial_shapes_list, valid_ratios, device):
"""
Get reference points for each feature map. Used in decoder.
Args:
spatial_shapes_list (`list` of `tuple`):
Spatial shapes of the backbone feature maps as a list of tuples.
valid_ratios (`torch.FloatTensor`):
Valid ratios of each feature map, has shape of `(batch_size, num_feature_levels, 2)`.
device (`torch.device`):
Device on which to create the tensors.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
"""
reference_points_list = []
for lvl, (height, width) in enumerate(spatial_shapes_list):
ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device), torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device), indexing='ij')
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * height)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * width)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes_list=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes_list (`list` of `tuple`):
Spatial shapes of each feature map as a list of tuples.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
reference_points = self.get_reference_points(spatial_shapes_list, valid_ratios, device=inputs_embeds.device)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, encoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states.transpose(1, 0),)
layer_outputs = encoder_layer(hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states.transpose(1, 0),)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)
|
class Mask2FormerPixelDecoderEncoderOnly(nn.Module):
'''
Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
[`Mask2FormerPixelDecoderEncoderLayer`]. The encoder updates the flattened multi-scale feature maps through
multiple deformable attention layers.
Args:
config: Mask2FormerConfig
'''
def __init__(self, config: Mask2FormerConfig):
pass
@staticmethod
def get_reference_points(spatial_shapes_list, valid_ratios, device):
'''
Get reference points for each feature map. Used in decoder.
Args:
spatial_shapes_list (`list` of `tuple`):
Spatial shapes of the backbone feature maps as a list of tuples.
valid_ratios (`torch.FloatTensor`):
Valid ratios of each feature map, has shape of `(batch_size, num_feature_levels, 2)`.
device (`torch.device`):
Device on which to create the tensors.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
'''
pass
def forward(self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes_list=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes_list (`list` of `tuple`):
Spatial shapes of each feature map as a list of tuples.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 5
| 3
| 38
| 4
| 21
| 13
| 4
| 0.69
| 1
| 6
| 3
| 0
| 2
| 3
| 3
| 13
| 126
| 16
| 65
| 31
| 49
| 45
| 35
| 18
| 31
| 10
| 1
| 2
| 13
|
3,645
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoderOutput
|
from ...utils import auto_docstring, is_accelerate_available, logging
from ...file_utils import ModelOutput, is_scipy_available, requires_backends
from typing import Optional, Union
from dataclasses import dataclass
import torch
@dataclass
@auto_docstring(custom_intro="\n Mask2Former's pixel decoder module output, practically a Multi-Scale Deformable Attention based decoder. It returns\n the mask features and the multiscale features.\n ")
class Mask2FormerPixelDecoderOutput(ModelOutput):
"""
multi_scale_features (`tuple(torch.FloatTensor)`):
Tuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height,
width)`from the Multi-Scale Deformable Attenntion based Pixel Decoder.
mask_features (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder
Layer.
attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed
or when `config.output_attentions=True`
"""
multi_scale_features: Optional[tuple[torch.FloatTensor]] = None
mask_features: Optional[torch.FloatTensor] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Mask2Former's pixel decoder module output, practically a Multi-Scale Deformable Attention based decoder. It returns\n the mask features and the multiscale features.\n ")
class Mask2FormerPixelDecoderOutput(ModelOutput):
'''
multi_scale_features (`tuple(torch.FloatTensor)`):
Tuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height,
width)`from the Multi-Scale Deformable Attenntion based Pixel Decoder.
mask_features (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder
Layer.
attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed
or when `config.output_attentions=True`
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 2
| 4
| 4
| 3
| 15
| 4
| 4
| 3
| 0
| 1
| 0
| 0
|
3,646
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelLevelModule
|
from torch import Tensor, nn
from .configuration_mask2former import Mask2FormerConfig
from ...utils.backbone_utils import load_backbone
class Mask2FormerPixelLevelModule(nn.Module):
def __init__(self, config: Mask2FormerConfig):
"""
Pixel Level Module proposed in [Masked-attention Mask Transformer for Universal Image
Segmentation](https://huggingface.co/papers/2112.01527). It runs the input image through a backbone and a pixel
decoder, generating multi-scale feature maps and pixel embeddings.
Args:
config ([`Mask2FormerConfig`]):
The configuration used to instantiate this model.
"""
super().__init__()
self.encoder = load_backbone(config)
self.decoder = Mask2FormerPixelDecoder(config, feature_channels=self.encoder.channels)
def forward(self, pixel_values: Tensor, output_hidden_states: bool=False) -> Mask2FormerPixelLevelModuleOutput:
backbone_features = self.encoder(pixel_values).feature_maps
decoder_output = self.decoder(backbone_features, output_hidden_states=output_hidden_states)
return Mask2FormerPixelLevelModuleOutput(encoder_last_hidden_state=backbone_features[-1], encoder_hidden_states=tuple(backbone_features) if output_hidden_states else None, decoder_last_hidden_state=decoder_output.mask_features, decoder_hidden_states=decoder_output.multi_scale_features)
|
class Mask2FormerPixelLevelModule(nn.Module):
def __init__(self, config: Mask2FormerConfig):
'''
Pixel Level Module proposed in [Masked-attention Mask Transformer for Universal Image
Segmentation](https://huggingface.co/papers/2112.01527). It runs the input image through a backbone and a pixel
decoder, generating multi-scale feature maps and pixel embeddings.
Args:
config ([`Mask2FormerConfig`]):
The configuration used to instantiate this model.
'''
pass
def forward(self, pixel_values: Tensor, output_hidden_states: bool=False) -> Mask2FormerPixelLevelModuleOutput:
pass
| 3
| 1
| 12
| 2
| 7
| 4
| 2
| 0.57
| 1
| 7
| 3
| 0
| 2
| 2
| 2
| 12
| 26
| 4
| 14
| 7
| 11
| 8
| 9
| 7
| 6
| 2
| 1
| 0
| 3
|
3,647
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelLevelModuleOutput
|
from ...utils import auto_docstring, is_accelerate_available, logging
from ...file_utils import ModelOutput, is_scipy_available, requires_backends
from typing import Optional, Union
from dataclasses import dataclass
import torch
@dataclass
@auto_docstring(custom_intro="\n Mask2Former's pixel level module output. It returns the output of the encoder (optional) and all hidden states\n (multi-scale features) from the `decoder`. By default, the `encoder` is a Swin Backbone and the `decoder` is a\n Multi-Scale Deformable Attention based decoder.\n\n The `decoder_last_hidden_state` are the **per-pixel embeddings** while `decoder_hidden_states` refer to multi-scale\n feature maps produced using **multi-scaling strategy** defined in the paper.\n ")
class Mask2FormerPixelLevelModuleOutput(ModelOutput):
"""
encoder_last_hidden_state (`torch.FloatTensor`):
Last hidden states (final feature map of shape `(batch_size, num_channels, height, width)`) of the last
stage of the encoder.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden states (also
called feature maps) of the model at the output of each stage. Returned if output_hidden_states is set to
True.
decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)):
1/4 scale features from the last Pixel Decoder Layer.
decoder_hidden_states (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden states (also
called feature maps) of the model at the output of each stage.
"""
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_last_hidden_state: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Mask2Former's pixel level module output. It returns the output of the encoder (optional) and all hidden states\n (multi-scale features) from the `decoder`. By default, the `encoder` is a Swin Backbone and the `decoder` is a\n Multi-Scale Deformable Attention based decoder.\n\n The `decoder_last_hidden_state` are the **per-pixel embeddings** while `decoder_hidden_states` refer to multi-scale\n feature maps produced using **multi-scaling strategy** defined in the paper.\n ")
class Mask2FormerPixelLevelModuleOutput(ModelOutput):
'''
encoder_last_hidden_state (`torch.FloatTensor`):
Last hidden states (final feature map of shape `(batch_size, num_channels, height, width)`) of the last
stage of the encoder.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden states (also
called feature maps) of the model at the output of each stage. Returned if output_hidden_states is set to
True.
decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)):
1/4 scale features from the last Pixel Decoder Layer.
decoder_hidden_states (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden states (also
called feature maps) of the model at the output of each stage.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 3
| 5
| 5
| 4
| 20
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
3,648
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerPreTrainedModel
|
import math
from ...utils import auto_docstring, is_accelerate_available, logging
from ...modeling_utils import PreTrainedModel
from .configuration_mask2former import Mask2FormerConfig
from torch import Tensor, nn
import torch
@auto_docstring
class Mask2FormerPreTrainedModel(PreTrainedModel):
config: Mask2FormerConfig
base_model_prefix = 'model'
main_input_name = 'pixel_values'
def _init_weights(self, module: nn.Module):
xavier_std = self.config.init_xavier_std
std = self.config.init_std
if isinstance(module, Mask2FormerTransformerModule):
if module.input_projections is not None:
for input_projection in module.input_projections:
if not isinstance(input_projection, nn.Sequential):
nn.init.xavier_uniform_(input_projection.weight, gain=xavier_std)
nn.init.constant_(input_projection.bias, 0)
elif isinstance(module, Mask2FormerPixelDecoderEncoderMultiscaleDeformableAttention):
nn.init.constant_(module.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(module.n_heads, dtype=torch.int64).float() * (2.0 * math.pi / module.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(module.n_heads, 1, 1, 2).repeat(1, module.n_levels, module.n_points, 1)
for i in range(module.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
module.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
nn.init.constant_(module.attention_weights.weight.data, 0.0)
nn.init.constant_(module.attention_weights.bias.data, 0.0)
nn.init.xavier_uniform_(module.value_proj.weight.data)
nn.init.constant_(module.value_proj.bias.data, 0.0)
nn.init.xavier_uniform_(module.output_proj.weight.data)
nn.init.constant_(module.output_proj.bias.data, 0.0)
elif isinstance(module, Mask2FormerMaskedAttentionDecoderLayer):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain=xavier_std)
module.cross_attn.in_proj_bias.data.zero_()
elif isinstance(module, Mask2FormerPixelDecoder):
nn.init.normal_(module.level_embed, std=0)
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if hasattr(module, 'reference_points'):
nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0)
nn.init.constant_(module.reference_points.bias.data, 0.0)
|
@auto_docstring
class Mask2FormerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
pass
| 3
| 0
| 68
| 10
| 58
| 0
| 25
| 0
| 1
| 7
| 6
| 2
| 1
| 0
| 1
| 1
| 73
| 11
| 62
| 13
| 60
| 0
| 51
| 13
| 49
| 25
| 1
| 4
| 25
|
3,649
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerPredictionBlock
|
from torch import Tensor, nn
class Mask2FormerPredictionBlock(nn.Module):
def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None:
super().__init__()
self.layers = [nn.Linear(in_dim, out_dim), activation]
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
|
class Mask2FormerPredictionBlock(nn.Module):
def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None:
pass
def forward(self, input: Tensor) -> Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 2
| 0.09
| 1
| 5
| 0
| 0
| 2
| 1
| 2
| 12
| 13
| 1
| 11
| 7
| 8
| 1
| 11
| 7
| 8
| 2
| 1
| 1
| 4
|
3,650
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerSinePositionEmbedding
|
from typing import Optional, Union
from torch import Tensor, nn
import torch
import math
from ...pytorch_utils import compile_compatible_method_lru_cache
class Mask2FormerSinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats: int=64, temperature: int=10000, normalize: bool=False, scale: Optional[float]=None):
super().__init__()
if scale is not None and normalize is False:
raise ValueError('normalize should be True if scale is passed')
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
self.scale = 2 * math.pi if scale is None else scale
@compile_compatible_method_lru_cache(maxsize=1)
def forward(self, shape: torch.Size, device: Union[torch.device, str], dtype: torch.dtype, mask: Optional[Tensor]=None) -> Tensor:
if mask is None:
mask = torch.zeros((shape[0], shape[2], shape[3]), device=device, dtype=torch.bool)
not_mask = (~mask).to(dtype)
y_embed = not_mask.cumsum(1)
x_embed = not_mask.cumsum(2)
if self.normalize:
eps = 1e-06
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=device).to(dtype)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
|
class Mask2FormerSinePositionEmbedding(nn.Module):
'''
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
'''
def __init__(self, num_pos_feats: int=64, temperature: int=10000, normalize: bool=False, scale: Optional[float]=None):
pass
@compile_compatible_method_lru_cache(maxsize=1)
def forward(self, shape: torch.Size, device: Union[torch.device, str], dtype: torch.dtype, mask: Optional[Tensor]=None) -> Tensor:
pass
| 4
| 1
| 15
| 1
| 14
| 0
| 3
| 0.14
| 1
| 6
| 0
| 0
| 2
| 4
| 2
| 12
| 37
| 4
| 29
| 17
| 24
| 4
| 27
| 15
| 24
| 3
| 1
| 1
| 6
|
3,651
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mask2former/modeling_mask2former.py
|
transformers.models.mask2former.modeling_mask2former.Mask2FormerTransformerModule
|
from torch import Tensor, nn
from .configuration_mask2former import Mask2FormerConfig
class Mask2FormerTransformerModule(nn.Module):
"""
The Mask2Former's transformer module.
"""
def __init__(self, in_features: int, config: Mask2FormerConfig):
super().__init__()
hidden_dim = config.hidden_dim
self.num_feature_levels = 3
self.position_embedder = Mask2FormerSinePositionEmbedding(num_pos_feats=hidden_dim // 2, normalize=True)
self.queries_embedder = nn.Embedding(config.num_queries, hidden_dim)
self.queries_features = nn.Embedding(config.num_queries, hidden_dim)
self.input_projections = []
for _ in range(self.num_feature_levels):
if in_features != hidden_dim or config.enforce_input_projection:
self.input_projections.append(nn.Conv2d(in_features, hidden_dim, kernel_size=1))
else:
self.input_projections.append(nn.Sequential())
self.decoder = Mask2FormerMaskedAttentionDecoder(config=config)
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
def forward(self, multi_scale_features: list[Tensor], mask_features: Tensor, output_hidden_states: bool=False, output_attentions: bool=False) -> Mask2FormerMaskedAttentionDecoderOutput:
multi_stage_features = []
multi_stage_positional_embeddings = []
size_list = []
for i in range(self.num_feature_levels):
size_list.append(multi_scale_features[i].shape[-2:])
multi_stage_positional_embeddings.append(self.position_embedder(multi_scale_features[i].shape, multi_scale_features[i].device, multi_scale_features[i].dtype, None).flatten(2))
multi_stage_features.append(self.input_projections[i](multi_scale_features[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
multi_stage_positional_embeddings[-1] = multi_stage_positional_embeddings[-1].permute(2, 0, 1)
multi_stage_features[-1] = multi_stage_features[-1].permute(2, 0, 1)
_, batch_size, _ = multi_stage_features[0].shape
query_embeddings = self.queries_embedder.weight.unsqueeze(1).repeat(1, batch_size, 1)
query_features = self.queries_features.weight.unsqueeze(1).repeat(1, batch_size, 1)
decoder_output = self.decoder(inputs_embeds=query_features, multi_stage_positional_embeddings=multi_stage_positional_embeddings, pixel_embeddings=mask_features, encoder_hidden_states=multi_stage_features, query_position_embeddings=query_embeddings, feature_size_list=size_list, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True)
return decoder_output
|
class Mask2FormerTransformerModule(nn.Module):
'''
The Mask2Former's transformer module.
'''
def __init__(self, in_features: int, config: Mask2FormerConfig):
pass
def forward(self, multi_scale_features: list[Tensor], mask_features: Tensor, output_hidden_states: bool=False, output_attentions: bool=False) -> Mask2FormerMaskedAttentionDecoderOutput:
pass
| 3
| 1
| 30
| 4
| 25
| 1
| 3
| 0.1
| 1
| 9
| 4
| 0
| 2
| 7
| 2
| 12
| 65
| 10
| 50
| 26
| 41
| 5
| 30
| 20
| 27
| 3
| 1
| 2
| 5
|
3,652
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/configuration_maskformer.py
|
transformers.models.maskformer.configuration_maskformer.MaskFormerConfig
|
from ...utils.backbone_utils import verify_backbone_config_arguments
from ..detr import DetrConfig
from ...configuration_utils import PretrainedConfig
from typing import Optional
from ..auto import CONFIG_MAPPING
from ..swin import SwinConfig
class MaskFormerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MaskFormerModel`]. It is used to instantiate a
MaskFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MaskFormer
[facebook/maskformer-swin-base-ade](https://huggingface.co/facebook/maskformer-swin-base-ade) architecture trained
on [ADE20k-150](https://huggingface.co/datasets/scene_parse_150).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Currently, MaskFormer only supports the [Swin Transformer](swin) as backbone.
Args:
mask_feature_size (`int`, *optional*, defaults to 256):
The masks' features size, this value will also be used to specify the Feature Pyramid Network features'
size.
no_object_weight (`float`, *optional*, defaults to 0.1):
Weight to apply to the null (no object) class.
use_auxiliary_loss(`bool`, *optional*, defaults to `False`):
If `True` [`MaskFormerForInstanceSegmentationOutput`] will contain the auxiliary losses computed using the
logits from each decoder's stage.
backbone_config (`Dict`, *optional*):
The configuration passed to the backbone, if unset, the configuration corresponding to
`swin-base-patch4-window12-384` will be used.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
decoder_config (`Dict`, *optional*):
The configuration passed to the transformer decoder model, if unset the base config for `detr-resnet-50`
will be used.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
dice_weight (`float`, *optional*, defaults to 1.0):
The weight for the dice loss.
cross_entropy_weight (`float`, *optional*, defaults to 1.0):
The weight for the cross entropy loss.
mask_weight (`float`, *optional*, defaults to 20.0):
The weight for the mask loss.
output_auxiliary_logits (`bool`, *optional*):
Should the model output its `auxiliary_logits` or not.
Raises:
`ValueError`:
Raised if the backbone model type selected is not in `["swin"]` or the decoder model type selected is not
in `["detr"]`
Examples:
```python
>>> from transformers import MaskFormerConfig, MaskFormerModel
>>> # Initializing a MaskFormer facebook/maskformer-swin-base-ade configuration
>>> configuration = MaskFormerConfig()
>>> # Initializing a model (with random weights) from the facebook/maskformer-swin-base-ade style configuration
>>> model = MaskFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'maskformer'
attribute_map = {'hidden_size': 'mask_feature_size'}
backbones_supported = ['resnet', 'swin']
decoders_supported = ['detr']
def __init__(self, fpn_feature_size: int=256, mask_feature_size: int=256, no_object_weight: float=0.1, use_auxiliary_loss: bool=False, backbone_config: Optional[dict]=None, decoder_config: Optional[dict]=None, init_std: float=0.02, init_xavier_std: float=1.0, dice_weight: float=1.0, cross_entropy_weight: float=1.0, mask_weight: float=20.0, output_auxiliary_logits: Optional[bool]=None, backbone: Optional[str]=None, use_pretrained_backbone: bool=False, use_timm_backbone: bool=False, backbone_kwargs: Optional[dict]=None, **kwargs):
if backbone_config is None and backbone is None:
backbone_config = SwinConfig(image_size=384, num_channels=3, patch_size=4, embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12, drop_path_rate=0.3, out_features=['stage1', 'stage2', 'stage3', 'stage4'])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.pop('model_type')
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)
if backbone_config is not None and backbone_config.model_type not in self.backbones_supported:
logger.warning_once(f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. Supported model types: {','.join(self.backbones_supported)}")
if decoder_config is None:
decoder_config = DetrConfig()
else:
decoder_type = decoder_config.pop('model_type') if isinstance(decoder_config, dict) else decoder_config.model_type
if decoder_type not in self.decoders_supported:
raise ValueError(f"Transformer Decoder {decoder_type} not supported, please use one of {','.join(self.decoders_supported)}")
if isinstance(decoder_config, dict):
config_class = CONFIG_MAPPING[decoder_type]
decoder_config = config_class.from_dict(decoder_config)
self.backbone_config = backbone_config
self.decoder_config = decoder_config
self.fpn_feature_size = fpn_feature_size
self.mask_feature_size = mask_feature_size
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.cross_entropy_weight = cross_entropy_weight
self.dice_weight = dice_weight
self.mask_weight = mask_weight
self.use_auxiliary_loss = use_auxiliary_loss
self.no_object_weight = no_object_weight
self.output_auxiliary_logits = output_auxiliary_logits
self.num_attention_heads = self.decoder_config.encoder_attention_heads
self.num_hidden_layers = self.decoder_config.num_hidden_layers
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
super().__init__(**kwargs)
@property
def sub_configs(self):
sub_configs = {}
if self.backbone_config is not None and self.backbone_config != {}:
sub_configs['backbone_config'] = type(self.backbone_config)
if self.decoder_config is not None and self.decoder_config != {}:
sub_configs['decoder_config'] = type(self.decoder_config)
return sub_configs
@classmethod
def from_backbone_and_decoder_configs(cls, backbone_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs):
"""Instantiate a [`MaskFormerConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model
configuration.
Args:
backbone_config ([`PretrainedConfig`]):
The backbone configuration.
decoder_config ([`PretrainedConfig`]):
The transformer decoder configuration to use.
Returns:
[`MaskFormerConfig`]: An instance of a configuration object
"""
return cls(backbone_config=backbone_config, decoder_config=decoder_config, **kwargs)
|
class MaskFormerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MaskFormerModel`]. It is used to instantiate a
MaskFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MaskFormer
[facebook/maskformer-swin-base-ade](https://huggingface.co/facebook/maskformer-swin-base-ade) architecture trained
on [ADE20k-150](https://huggingface.co/datasets/scene_parse_150).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Currently, MaskFormer only supports the [Swin Transformer](swin) as backbone.
Args:
mask_feature_size (`int`, *optional*, defaults to 256):
The masks' features size, this value will also be used to specify the Feature Pyramid Network features'
size.
no_object_weight (`float`, *optional*, defaults to 0.1):
Weight to apply to the null (no object) class.
use_auxiliary_loss(`bool`, *optional*, defaults to `False`):
If `True` [`MaskFormerForInstanceSegmentationOutput`] will contain the auxiliary losses computed using the
logits from each decoder's stage.
backbone_config (`Dict`, *optional*):
The configuration passed to the backbone, if unset, the configuration corresponding to
`swin-base-patch4-window12-384` will be used.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
decoder_config (`Dict`, *optional*):
The configuration passed to the transformer decoder model, if unset the base config for `detr-resnet-50`
will be used.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
dice_weight (`float`, *optional*, defaults to 1.0):
The weight for the dice loss.
cross_entropy_weight (`float`, *optional*, defaults to 1.0):
The weight for the cross entropy loss.
mask_weight (`float`, *optional*, defaults to 20.0):
The weight for the mask loss.
output_auxiliary_logits (`bool`, *optional*):
Should the model output its `auxiliary_logits` or not.
Raises:
`ValueError`:
Raised if the backbone model type selected is not in `["swin"]` or the decoder model type selected is not
in `["detr"]`
Examples:
```python
>>> from transformers import MaskFormerConfig, MaskFormerModel
>>> # Initializing a MaskFormer facebook/maskformer-swin-base-ade configuration
>>> configuration = MaskFormerConfig()
>>> # Initializing a model (with random weights) from the facebook/maskformer-swin-base-ade style configuration
>>> model = MaskFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, fpn_feature_size: int=256, mask_feature_size: int=256, no_object_weight: float=0.1, use_auxiliary_loss: bool=False, backbone_config: Optional[dict]=None, decoder_config: Optional[dict]=None, init_std: float=0.02, init_xavier_std: float=1.0, dice_weight: float=1.0, cross_entropy_weight: float=1.0, mask_weight: float=20.0, output_auxiliary_logits: Optional[bool]=None, backbone: Optional[str]=None, use_pretrained_backbone: bool=False, use_timm_backbone: bool=False, backbone_kwargs: Optional[dict]=None, **kwargs):
pass
@property
def sub_configs(self):
pass
@classmethod
def from_backbone_and_decoder_configs(cls, backbone_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs):
'''Instantiate a [`MaskFormerConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model
configuration.
Args:
backbone_config ([`PretrainedConfig`]):
The backbone configuration.
decoder_config ([`PretrainedConfig`]):
The transformer decoder configuration to use.
Returns:
[`MaskFormerConfig`]: An instance of a configuration object
'''
pass
| 6
| 2
| 56
| 3
| 45
| 9
| 5
| 0.84
| 1
| 9
| 2
| 0
| 1
| 18
| 2
| 2
| 194
| 19
| 95
| 50
| 70
| 80
| 43
| 28
| 40
| 8
| 1
| 2
| 9
|
3,653
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/configuration_maskformer_swin.py
|
transformers.models.maskformer.configuration_maskformer_swin.MaskFormerSwinConfig
|
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
from ...configuration_utils import PretrainedConfig
class MaskFormerSwinConfig(BackboneConfigMixin, PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MaskFormerSwinModel`]. It is used to instantiate
a Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Swin
[microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list[int]`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list[int]`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to True):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to False):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import MaskFormerSwinConfig, MaskFormerSwinModel
>>> # Initializing a microsoft/swin-tiny-patch4-window7-224 style configuration
>>> configuration = MaskFormerSwinConfig()
>>> # Initializing a model (with random weights) from the microsoft/swin-tiny-patch4-window7-224 style configuration
>>> model = MaskFormerSwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'maskformer-swin'
attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, out_features=None, out_indices=None, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
|
class MaskFormerSwinConfig(BackboneConfigMixin, PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MaskFormerSwinModel`]. It is used to instantiate
a Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Swin
[microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list[int]`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list[int]`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to True):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to False):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import MaskFormerSwinConfig, MaskFormerSwinModel
>>> # Initializing a microsoft/swin-tiny-patch4-window7-224 style configuration
>>> configuration = MaskFormerSwinConfig()
>>> # Initializing a model (with random weights) from the microsoft/swin-tiny-patch4-window7-224 style configuration
>>> model = MaskFormerSwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, out_features=None, out_indices=None, **kwargs):
pass
| 2
| 1
| 48
| 1
| 45
| 2
| 1
| 1.25
| 2
| 3
| 0
| 0
| 1
| 21
| 1
| 6
| 126
| 11
| 51
| 45
| 28
| 64
| 25
| 24
| 23
| 1
| 1
| 0
| 1
|
3,654
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py
|
transformers.models.maskformer.convert_maskformer_original_pytorch_checkpoint_to_pytorch.Args
|
from dataclasses import dataclass
@dataclass
class Args:
"""Fake command line arguments needed by maskformer/detectron implementation"""
config_file: str
|
@dataclass
class Args:
'''Fake command line arguments needed by maskformer/detectron implementation'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 2
| 1
| 1
| 1
| 2
| 1
| 1
| 0
| 0
| 0
| 0
|
3,655
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py
|
transformers.models.maskformer.convert_maskformer_original_pytorch_checkpoint_to_pytorch.OriginalMaskFormerCheckpointToOursConverter
|
from collections.abc import Iterator
from transformers.models.maskformer.modeling_maskformer import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerForInstanceSegmentationOutput, MaskFormerModel, MaskFormerModelOutput
from pprint import pformat
from pathlib import Path
from torch import Tensor, nn
class OriginalMaskFormerCheckpointToOursConverter:
def __init__(self, original_model: nn.Module, config: MaskFormerConfig):
self.original_model = original_model
self.config = config
def pop_all(self, renamed_keys: list[tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
for src_key, dst_key in renamed_keys:
dst_state_dict[dst_key] = src_state_dict.pop(src_key)
def replace_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: MaskFormerConfig):
dst_prefix: str = 'pixel_level_module.encoder'
src_prefix: str = 'backbone'
renamed_keys = [(f'{src_prefix}.patch_embed.proj.weight', f'{dst_prefix}.model.embeddings.patch_embeddings.projection.weight'), (f'{src_prefix}.patch_embed.proj.bias', f'{dst_prefix}.model.embeddings.patch_embeddings.projection.bias'), (f'{src_prefix}.patch_embed.norm.weight', f'{dst_prefix}.model.embeddings.norm.weight'), (f'{src_prefix}.patch_embed.norm.bias', f'{dst_prefix}.model.embeddings.norm.bias')]
num_layers = len(config.backbone_config.depths)
for layer_idx in range(num_layers):
for block_idx in range(config.backbone_config.depths[layer_idx]):
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table')])
src_att_weight = src_state_dict[f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight']
src_att_bias = src_state_dict[f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias']
size = src_att_weight.shape[0]
offset = size // 3
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight'] = src_att_weight[:offset, :]
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias'] = src_att_bias[:offset]
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight'] = src_att_weight[offset:offset * 2, :]
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias'] = src_att_bias[offset:offset * 2]
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight'] = src_att_weight[-offset:, :]
dst_state_dict[f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias'] = src_att_bias[-offset:]
src_state_dict.pop(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight')
src_state_dict.pop(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias')
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index', f'{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index')])
if layer_idx < num_layers - 1:
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.downsample.reduction.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.reduction.weight'), (f'{src_prefix}.layers.{layer_idx}.downsample.norm.weight', f'{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.weight'), (f'{src_prefix}.layers.{layer_idx}.downsample.norm.bias', f'{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.bias')])
renamed_keys.extend([(f'{src_prefix}.norm{layer_idx}.weight', f'{dst_prefix}.hidden_states_norms.{layer_idx}.weight'), (f'{src_prefix}.norm{layer_idx}.bias', f'{dst_prefix}.hidden_states_norms.{layer_idx}.bias')])
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'pixel_level_module.decoder'
src_prefix: str = 'sem_seg_head.pixel_decoder'
self.replace_backbone(dst_state_dict, src_state_dict, self.config)
def rename_keys_for_conv(detectron_conv: str, mine_conv: str):
return [(f'{detectron_conv}.weight', f'{mine_conv}.0.weight'), (f'{detectron_conv}.norm.weight', f'{mine_conv}.1.weight'), (f'{detectron_conv}.norm.bias', f'{mine_conv}.1.bias')]
renamed_keys = [(f'{src_prefix}.mask_features.weight', f'{dst_prefix}.mask_projection.weight'), (f'{src_prefix}.mask_features.bias', f'{dst_prefix}.mask_projection.bias')]
renamed_keys.extend(rename_keys_for_conv(f'{src_prefix}.layer_4', f'{dst_prefix}.fpn.stem'))
for src_i, dst_i in zip(range(3, 0, -1), range(0, 3)):
renamed_keys.extend(rename_keys_for_conv(f'{src_prefix}.adapter_{src_i}', f'{dst_prefix}.fpn.layers.{dst_i}.proj'))
renamed_keys.extend(rename_keys_for_conv(f'{src_prefix}.layer_{src_i}', f'{dst_prefix}.fpn.layers.{dst_i}.block'))
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def rename_keys_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'transformer_module.decoder'
src_prefix: str = 'sem_seg_head.predictor.transformer.decoder'
rename_keys = []
for i in range(self.config.decoder_config.decoder_layers):
rename_keys.append((f'{src_prefix}.layers.{i}.self_attn.out_proj.weight', f'{dst_prefix}.layers.{i}.self_attn.out_proj.weight'))
rename_keys.append((f'{src_prefix}.layers.{i}.self_attn.out_proj.bias', f'{dst_prefix}.layers.{i}.self_attn.out_proj.bias'))
rename_keys.append((f'{src_prefix}.layers.{i}.multihead_attn.out_proj.weight', f'{dst_prefix}.layers.{i}.encoder_attn.out_proj.weight'))
rename_keys.append((f'{src_prefix}.layers.{i}.multihead_attn.out_proj.bias', f'{dst_prefix}.layers.{i}.encoder_attn.out_proj.bias'))
rename_keys.append((f'{src_prefix}.layers.{i}.linear1.weight', f'{dst_prefix}.layers.{i}.fc1.weight'))
rename_keys.append((f'{src_prefix}.layers.{i}.linear1.bias', f'{dst_prefix}.layers.{i}.fc1.bias'))
rename_keys.append((f'{src_prefix}.layers.{i}.linear2.weight', f'{dst_prefix}.layers.{i}.fc2.weight'))
rename_keys.append((f'{src_prefix}.layers.{i}.linear2.bias', f'{dst_prefix}.layers.{i}.fc2.bias'))
rename_keys.append((f'{src_prefix}.layers.{i}.norm1.weight', f'{dst_prefix}.layers.{i}.self_attn_layer_norm.weight'))
rename_keys.append((f'{src_prefix}.layers.{i}.norm1.bias', f'{dst_prefix}.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'{src_prefix}.layers.{i}.norm2.weight', f'{dst_prefix}.layers.{i}.encoder_attn_layer_norm.weight'))
rename_keys.append((f'{src_prefix}.layers.{i}.norm2.bias', f'{dst_prefix}.layers.{i}.encoder_attn_layer_norm.bias'))
rename_keys.append((f'{src_prefix}.layers.{i}.norm3.weight', f'{dst_prefix}.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'{src_prefix}.layers.{i}.norm3.bias', f'{dst_prefix}.layers.{i}.final_layer_norm.bias'))
return rename_keys
def replace_q_k_v_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'transformer_module.decoder'
src_prefix: str = 'sem_seg_head.predictor.transformer.decoder'
for i in range(self.config.decoder_config.decoder_layers):
in_proj_weight = src_state_dict.pop(f'{src_prefix}.layers.{i}.self_attn.in_proj_weight')
in_proj_bias = src_state_dict.pop(f'{src_prefix}.layers.{i}.self_attn.in_proj_bias')
dst_state_dict[f'{dst_prefix}.layers.{i}.self_attn.q_proj.weight'] = in_proj_weight[:256, :]
dst_state_dict[f'{dst_prefix}.layers.{i}.self_attn.q_proj.bias'] = in_proj_bias[:256]
dst_state_dict[f'{dst_prefix}.layers.{i}.self_attn.k_proj.weight'] = in_proj_weight[256:512, :]
dst_state_dict[f'{dst_prefix}.layers.{i}.self_attn.k_proj.bias'] = in_proj_bias[256:512]
dst_state_dict[f'{dst_prefix}.layers.{i}.self_attn.v_proj.weight'] = in_proj_weight[-256:, :]
dst_state_dict[f'{dst_prefix}.layers.{i}.self_attn.v_proj.bias'] = in_proj_bias[-256:]
in_proj_weight_cross_attn = src_state_dict.pop(f'{src_prefix}.layers.{i}.multihead_attn.in_proj_weight')
in_proj_bias_cross_attn = src_state_dict.pop(f'{src_prefix}.layers.{i}.multihead_attn.in_proj_bias')
dst_state_dict[f'{dst_prefix}.layers.{i}.encoder_attn.q_proj.weight'] = in_proj_weight_cross_attn[:256, :]
dst_state_dict[f'{dst_prefix}.layers.{i}.encoder_attn.q_proj.bias'] = in_proj_bias_cross_attn[:256]
dst_state_dict[f'{dst_prefix}.layers.{i}.encoder_attn.k_proj.weight'] = in_proj_weight_cross_attn[256:512, :]
dst_state_dict[f'{dst_prefix}.layers.{i}.encoder_attn.k_proj.bias'] = in_proj_bias_cross_attn[256:512]
dst_state_dict[f'{dst_prefix}.layers.{i}.encoder_attn.v_proj.weight'] = in_proj_weight_cross_attn[-256:, :]
dst_state_dict[f'{dst_prefix}.layers.{i}.encoder_attn.v_proj.bias'] = in_proj_bias_cross_attn[-256:]
def replace_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'transformer_module.decoder'
src_prefix: str = 'sem_seg_head.predictor.transformer.decoder'
renamed_keys = self.rename_keys_in_detr_decoder(dst_state_dict, src_state_dict)
renamed_keys.extend([(f'{src_prefix}.norm.weight', f'{dst_prefix}.layernorm.weight'), (f'{src_prefix}.norm.bias', f'{dst_prefix}.layernorm.bias')])
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
self.replace_q_k_v_in_detr_decoder(dst_state_dict, src_state_dict)
def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'transformer_module'
src_prefix: str = 'sem_seg_head.predictor'
self.replace_detr_decoder(dst_state_dict, src_state_dict)
renamed_keys = [(f'{src_prefix}.query_embed.weight', f'{dst_prefix}.queries_embedder.weight'), (f'{src_prefix}.input_proj.weight', f'{dst_prefix}.input_projection.weight'), (f'{src_prefix}.input_proj.bias', f'{dst_prefix}.input_projection.bias')]
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_instance_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = ''
src_prefix: str = 'sem_seg_head.predictor'
renamed_keys = [(f'{src_prefix}.class_embed.weight', f'{dst_prefix}class_predictor.weight'), (f'{src_prefix}.class_embed.bias', f'{dst_prefix}class_predictor.bias')]
mlp_len = 3
for i in range(mlp_len):
renamed_keys.extend([(f'{src_prefix}.mask_embed.layers.{i}.weight', f'{dst_prefix}mask_embedder.{i}.0.weight'), (f'{src_prefix}.mask_embed.layers.{i}.bias', f'{dst_prefix}mask_embedder.{i}.0.bias')])
logger.info(f'Replacing keys {pformat(renamed_keys)}')
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def convert(self, mask_former: MaskFormerModel) -> MaskFormerModel:
dst_state_dict = TrackedStateDict(mask_former.state_dict())
src_state_dict = self.original_model.state_dict()
self.replace_pixel_module(dst_state_dict, src_state_dict)
self.replace_transformer_module(dst_state_dict, src_state_dict)
logger.info(f'Missed keys are {pformat(dst_state_dict.diff())}')
logger.info(f'Not copied keys are {pformat(src_state_dict.keys())}')
logger.info('🙌 Done')
mask_former.load_state_dict(dst_state_dict)
return mask_former
def convert_instance_segmentation(self, mask_former: MaskFormerForInstanceSegmentation) -> MaskFormerForInstanceSegmentation:
dst_state_dict = TrackedStateDict(mask_former.state_dict())
src_state_dict = self.original_model.state_dict()
self.replace_instance_segmentation_module(dst_state_dict, src_state_dict)
mask_former.load_state_dict(dst_state_dict)
return mask_former
@staticmethod
def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[tuple[object, Path, Path]]:
checkpoints: list[Path] = checkpoints_dir.glob('**/*.pkl')
for checkpoint in checkpoints:
logger.info(f'💪 Converting {checkpoint.stem}')
config: Path = config_dir / checkpoint.parents[0].stem / 'swin' / f'{checkpoint.stem}.yaml'
yield (config, checkpoint)
|
class OriginalMaskFormerCheckpointToOursConverter:
def __init__(self, original_model: nn.Module, config: MaskFormerConfig):
pass
def pop_all(self, renamed_keys: list[tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: MaskFormerConfig):
pass
def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def rename_keys_for_conv(detectron_conv: str, mine_conv: str):
pass
def rename_keys_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_q_k_v_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_instance_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def convert(self, mask_former: MaskFormerModel) -> MaskFormerModel:
pass
def convert_instance_segmentation(self, mask_former: MaskFormerForInstanceSegmentation) -> MaskFormerForInstanceSegmentation:
pass
@staticmethod
def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[tuple[object, Path, Path]]:
pass
| 15
| 0
| 28
| 3
| 24
| 2
| 2
| 0.07
| 0
| 9
| 1
| 0
| 11
| 2
| 12
| 12
| 371
| 45
| 305
| 63
| 288
| 22
| 131
| 60
| 117
| 4
| 0
| 2
| 22
|
3,656
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py
|
transformers.models.maskformer.convert_maskformer_original_pytorch_checkpoint_to_pytorch.OriginalMaskFormerConfigToImageProcessorConverter
|
from detectron2.data import MetadataCatalog
from transformers.models.maskformer.feature_extraction_maskformer import MaskFormerImageProcessor
import torch
class OriginalMaskFormerConfigToImageProcessorConverter:
def __call__(self, original_config: object) -> MaskFormerImageProcessor:
model = original_config.MODEL
model_input = original_config.INPUT
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0])
return MaskFormerImageProcessor(image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), size=model_input.MIN_SIZE_TEST, max_size=model_input.MAX_SIZE_TEST, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, ignore_index=dataset_catalog.ignore_label, size_divisibility=32)
|
class OriginalMaskFormerConfigToImageProcessorConverter:
def __call__(self, original_config: object) -> MaskFormerImageProcessor:
pass
| 2
| 0
| 14
| 1
| 13
| 1
| 1
| 0.07
| 0
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 15
| 1
| 14
| 5
| 12
| 1
| 6
| 5
| 4
| 1
| 0
| 0
| 1
|
3,657
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py
|
transformers.models.maskformer.convert_maskformer_original_pytorch_checkpoint_to_pytorch.OriginalMaskFormerConfigToOursConverter
|
from transformers.models.maskformer.modeling_maskformer import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerForInstanceSegmentationOutput, MaskFormerModel, MaskFormerModelOutput
from detectron2.data import MetadataCatalog
class OriginalMaskFormerConfigToOursConverter:
def __call__(self, original_config: object) -> MaskFormerConfig:
model = original_config.MODEL
mask_former = model.MASK_FORMER
swin = model.SWIN
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0])
id2label = dict(enumerate(dataset_catalog.stuff_classes))
label2id = {label: idx for idx, label in id2label.items()}
config: MaskFormerConfig = MaskFormerConfig(fpn_feature_size=model.SEM_SEG_HEAD.CONVS_DIM, mask_feature_size=model.SEM_SEG_HEAD.MASK_DIM, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, no_object_weight=mask_former.NO_OBJECT_WEIGHT, num_queries=mask_former.NUM_OBJECT_QUERIES, backbone_config={'pretrain_img_size': swin.PRETRAIN_IMG_SIZE, 'image_size': swin.PRETRAIN_IMG_SIZE, 'in_channels': 3, 'patch_size': swin.PATCH_SIZE, 'embed_dim': swin.EMBED_DIM, 'depths': swin.DEPTHS, 'num_heads': swin.NUM_HEADS, 'window_size': swin.WINDOW_SIZE, 'drop_path_rate': swin.DROP_PATH_RATE, 'model_type': 'swin'}, dice_weight=mask_former.DICE_WEIGHT, ce_weight=1.0, mask_weight=mask_former.MASK_WEIGHT, decoder_config={'model_type': 'detr', 'max_position_embeddings': 1024, 'encoder_layers': 6, 'encoder_ffn_dim': 2048, 'encoder_attention_heads': 8, 'decoder_layers': mask_former.DEC_LAYERS, 'decoder_ffn_dim': mask_former.DIM_FEEDFORWARD, 'decoder_attention_heads': mask_former.NHEADS, 'encoder_layerdrop': 0.0, 'decoder_layerdrop': 0.0, 'd_model': mask_former.HIDDEN_DIM, 'dropout': mask_former.DROPOUT, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'init_std': 0.02, 'init_xavier_std': 1.0, 'scale_embedding': False, 'auxiliary_loss': False, 'dilation': False}, id2label=id2label, label2id=label2id)
return config
|
class OriginalMaskFormerConfigToOursConverter:
def __call__(self, original_config: object) -> MaskFormerConfig:
pass
| 2
| 0
| 57
| 3
| 53
| 1
| 1
| 0.02
| 0
| 4
| 0
| 0
| 1
| 0
| 1
| 1
| 58
| 3
| 54
| 9
| 52
| 1
| 10
| 9
| 8
| 1
| 0
| 0
| 1
|
3,658
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/feature_extraction_maskformer.py
|
transformers.models.maskformer.feature_extraction_maskformer.MaskFormerFeatureExtractor
|
import warnings
from ...utils.import_utils import requires
from .image_processing_maskformer import MaskFormerImageProcessor
@requires(backends=('vision',))
class MaskFormerFeatureExtractor(MaskFormerImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class MaskFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use MaskFormerImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class MaskFormerFeatureExtractor(MaskFormerImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 39
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
3,659
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/image_processing_maskformer.py
|
transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor
|
from ...image_processing_utils import INIT_SERVICE_KWARGS, BaseImageProcessor, BatchFeature, get_size_dict
from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...image_transforms import PaddingMode, get_resize_output_image_size, pad, rescale, resize, to_channel_dimension_format
from ...utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, logging
import numpy as np
from typing import TYPE_CHECKING, Any, Optional, Union
import warnings
from ...utils.import_utils import requires
from collections.abc import Iterable
@requires(backends=('vision',))
class MaskFormerImageProcessor(BaseImageProcessor):
"""
Constructs a MaskFormer image processor. The image processor can be used to prepare image(s) and optional targets
for the model.
This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input to a certain `size`.
size (`int`, *optional*, defaults to 800):
Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
height / width, size)`.
size_divisor (`int`, *optional*, defaults to 32):
Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in
Swin Transformer.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
`PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
`PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input to a certain `scale`.
rescale_factor (`float`, *optional*, defaults to `1/ 255`):
Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
ImageNet std.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k).
The background label will be replaced by `ignore_index`.
num_labels (`int`, *optional*):
The number of labels in the segmentation map.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
model_input_names = ['pixel_values', 'pixel_mask']
@filter_out_non_signature_kwargs(extra=['max_size', *INIT_SERVICE_KWARGS])
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, num_labels: Optional[int]=None, pad_size: Optional[dict[str, int]]=None, **kwargs):
super().__init__(**kwargs)
self._max_size = kwargs.pop('max_size', 1333)
size = size if size is not None else {'shortest_edge': 800, 'longest_edge': self._max_size}
size = get_size_dict(size, max_size=self._max_size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.size_divisor = size_divisor
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.ignore_index = ignore_index
self.do_reduce_labels = do_reduce_labels
self.num_labels = num_labels
self.pad_size = pad_size
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the
`_max_size` attribute from the dictionary.
"""
image_processor_dict = super().to_dict()
image_processor_dict.pop('_max_size', None)
return image_processor_dict
def resize(self, image: np.ndarray, size: dict[str, int], size_divisor: int=0, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
The size of the output image.
size_divisor (`int`, *optional*, defaults to 0):
If `size_divisor` is given, the output image size will be divisible by the number.
resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
max_size = kwargs.pop('max_size', None)
size = get_size_dict(size, max_size=max_size, default_to_square=False)
if 'shortest_edge' in size and 'longest_edge' in size:
size, max_size = (size['shortest_edge'], size['longest_edge'])
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
max_size = None
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.")
size = get_maskformer_resize_output_image_size(image=image, size=size, max_size=max_size, size_divisor=size_divisor, default_to_square=False, input_data_format=input_data_format)
image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
return image
def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Rescale the image by the given factor. image = image * rescale_factor.
Args:
image (`np.ndarray`):
Image to rescale.
rescale_factor (`float`):
The value to use for rescaling.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
def convert_segmentation_map_to_binary_masks(self, segmentation_map: 'np.ndarray', instance_id_to_semantic_id: Optional[dict[int, int]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False):
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
ignore_index = ignore_index if ignore_index is not None else self.ignore_index
return convert_segmentation_map_to_binary_masks(segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels)
def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature:
return self.preprocess(images, segmentation_maps=segmentation_maps, **kwargs)
def _preprocess(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
if do_resize:
image = self.resize(image, size=size, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
return image
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single image."""
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(image=image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_mask(self, segmentation_map: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: int=0, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single mask."""
segmentation_map = to_numpy_array(segmentation_map)
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
segmentation_map = self._preprocess(image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, size_divisor=size_divisor, do_rescale=False, do_normalize=False, input_data_format=input_data_format)
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
return segmentation_map
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[dict[int, int]]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None) -> BatchFeature:
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False, max_size=self._max_size)
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
ignore_index = ignore_index if ignore_index is not None else self.ignore_index
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
pad_size = self.pad_size if pad_size is None else pad_size
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
if segmentation_maps is not None and (not valid_images(segmentation_maps)):
raise ValueError('Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if segmentation_maps is not None and len(images) != len(segmentation_maps):
raise ValueError('Images and segmentation maps must have the same length.')
images = [self._preprocess_image(image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for image in images]
if segmentation_maps is not None:
segmentation_maps = [self._preprocess_mask(segmentation_map, do_resize, size, size_divisor, input_data_format=input_data_format) for segmentation_map in segmentation_maps]
encoded_inputs = self.encode_inputs(images, segmentation_maps, instance_id_to_semantic_id, ignore_index, do_reduce_labels, return_tensors, input_data_format=data_format, pad_size=pad_size)
return encoded_inputs
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
return padded_image
def pad(self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None) -> BatchFeature:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
pad_size = pad_size if pad_size is not None else self.pad_size
if pad_size is not None:
padded_size = (pad_size['height'], pad_size['width'])
else:
padded_size = get_max_height_width(images, input_data_format=input_data_format)
padded_images = [self._pad_image(image, padded_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) for image in images]
data = {'pixel_values': padded_images}
if return_pixel_mask:
masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images]
data['pixel_mask'] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
def encode_inputs(self, pixel_values_list: list[ImageInput], segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None):
"""
Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
MaskFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps
will be converted to lists of binary masks and their respective labels. Let's see an example, assuming
`segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =
[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for
each mask.
Args:
pixel_values_list (`list[ImageInput]`):
List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,
width)`.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an
instance segmentation map where each pixel represents an instance id. Can be provided as a single
dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map
instance ids in each image separately.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
objects.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model.
- **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in
`self.model_input_names`).
- **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model
(when `annotations` are provided).
- **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when
`annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of
`mask_labels[i][j]` if `class_labels[i][j]`.
"""
ignore_index = self.ignore_index if ignore_index is None else ignore_index
do_reduce_labels = self.do_reduce_labels if do_reduce_labels is None else do_reduce_labels
pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list]
if input_data_format is None:
input_data_format = infer_channel_dimension_format(pixel_values_list[0])
encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors, input_data_format=input_data_format, pad_size=pad_size)
if segmentation_maps is not None:
mask_labels = []
class_labels = []
pad_size = get_max_height_width(pixel_values_list, input_data_format=input_data_format)
for idx, segmentation_map in enumerate(segmentation_maps):
segmentation_map = to_numpy_array(segmentation_map)
if isinstance(instance_id_to_semantic_id, list):
instance_id = instance_id_to_semantic_id[idx]
else:
instance_id = instance_id_to_semantic_id
masks, classes = self.convert_segmentation_map_to_binary_masks(segmentation_map, instance_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels)
if masks.shape[0] > 0:
masks = [mask[None, ...] for mask in masks]
masks = [self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index, input_data_format=ChannelDimension.FIRST) for mask in masks]
masks = np.concatenate(masks, axis=0)
else:
masks = np.zeros((0, *pad_size), dtype=np.float32)
mask_labels.append(torch.from_numpy(masks))
class_labels.append(torch.from_numpy(classes))
encoded_inputs['mask_labels'] = mask_labels
encoded_inputs['class_labels'] = class_labels
return encoded_inputs
def post_process_segmentation(self, outputs: 'MaskFormerForInstanceSegmentationOutput', target_size: Optional[tuple[int, int]]=None) -> 'torch.Tensor':
"""
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image segmentation predictions. Only
supports PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentationOutput`]):
The outputs from [`MaskFormerForInstanceSegmentation`].
target_size (`tuple[int, int]`, *optional*):
If set, the `masks_queries_logits` will be resized to `target_size`.
Returns:
`torch.Tensor`:
A tensor of shape (`batch_size, num_class_labels, height, width`).
"""
warnings.warn('`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use `post_process_instance_segmentation`', FutureWarning)
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
if target_size is not None:
masks_queries_logits = torch.nn.functional.interpolate(masks_queries_logits, size=target_size, mode='bilinear', align_corners=False)
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid()
segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)
return segmentation
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]]=None) -> 'torch.Tensor':
"""
Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid()
segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = torch.nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
def post_process_instance_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[list[tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False, return_binary_maps: Optional[bool]=False) -> list[dict]:
"""
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into instance segmentation predictions. Only
supports PyTorch. If instances could overlap, set either return_coco_annotation or return_binary_maps
to `True` to get the correct segmentation result.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
return_coco_annotation (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format.
return_binary_maps (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps
(one per detected instance).
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id`, or
`list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`, or a tensor of shape `(num_instances, height, width)` if return_binary_maps is set to `True`.
Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if return_coco_annotation and return_binary_maps:
raise ValueError('return_coco_annotation and return_binary_maps can not be both set to True.')
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
device = masks_queries_logits.device
num_classes = class_queries_logits.shape[-1] - 1
num_queries = class_queries_logits.shape[-2]
results: list[dict[str, TensorType]] = []
for i in range(class_queries_logits.shape[0]):
mask_pred = masks_queries_logits[i]
mask_cls = class_queries_logits[i]
scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1]
labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = torch.div(topk_indices, num_classes, rounding_mode='floor')
mask_pred = mask_pred[topk_indices]
pred_masks = (mask_pred > 0).float()
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / (pred_masks.flatten(1).sum(1) + 1e-06)
pred_scores = scores_per_image * mask_scores_per_image
pred_classes = labels_per_image
segmentation = torch.zeros(masks_queries_logits.shape[2:]) - 1
if target_sizes is not None:
segmentation = torch.zeros(target_sizes[i]) - 1
pred_masks = torch.nn.functional.interpolate(pred_masks.unsqueeze(0), size=target_sizes[i], mode='nearest')[0]
instance_maps, segments = ([], [])
current_segment_id = 0
for j in range(num_queries):
score = pred_scores[j].item()
if not torch.all(pred_masks[j] == 0) and score >= threshold:
segmentation[pred_masks[j] == 1] = current_segment_id
segments.append({'id': current_segment_id, 'label_id': pred_classes[j].item(), 'was_fused': False, 'score': round(score, 6)})
current_segment_id += 1
instance_maps.append(pred_masks[j])
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
if return_binary_maps and len(instance_maps) != 0:
segmentation = torch.stack(instance_maps, dim=0)
results.append({'segmentation': segmentation, 'segments_info': segments})
return results
def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[set[int]]=None, target_sizes: Optional[list[tuple[int, int]]]=None) -> list[dict]:
"""
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentationOutput`]):
The outputs from [`MaskFormerForInstanceSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning('`label_ids_to_fuse` unset. No instance will be fused.')
label_ids_to_fuse = set()
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid()
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels)
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({'segmentation': segmentation, 'segments_info': []})
continue
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size)
results.append({'segmentation': segmentation, 'segments_info': segments})
return results
|
@requires(backends=('vision',))
class MaskFormerImageProcessor(BaseImageProcessor):
'''
Constructs a MaskFormer image processor. The image processor can be used to prepare image(s) and optional targets
for the model.
This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input to a certain `size`.
size (`int`, *optional*, defaults to 800):
Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
height / width, size)`.
size_divisor (`int`, *optional*, defaults to 32):
Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in
Swin Transformer.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
`PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
`PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input to a certain `scale`.
rescale_factor (`float`, *optional*, defaults to `1/ 255`):
Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
ImageNet std.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k).
The background label will be replaced by `ignore_index`.
num_labels (`int`, *optional*):
The number of labels in the segmentation map.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
'''
@filter_out_non_signature_kwargs(extra=['max_size', *INIT_SERVICE_KWARGS])
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, num_labels: Optional[int]=None, pad_size: Optional[dict[str, int]]=None, **kwargs):
pass
def to_dict(self) -> dict[str, Any]:
'''
Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the
`_max_size` attribute from the dictionary.
'''
pass
def resize(self, image: np.ndarray, size: dict[str, int], size_divisor: int=0, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
The size of the output image.
size_divisor (`int`, *optional*, defaults to 0):
If `size_divisor` is given, the output image size will be divisible by the number.
resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Rescale the image by the given factor. image = image * rescale_factor.
Args:
image (`np.ndarray`):
Image to rescale.
rescale_factor (`float`):
The value to use for rescaling.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
'''
pass
def convert_segmentation_map_to_binary_masks(self, segmentation_map: 'np.ndarray', instance_id_to_semantic_id: Optional[dict[int, int]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False):
pass
def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature:
pass
def _preprocess(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
pass
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single image.'''
pass
def _preprocess_mask(self, segmentation_map: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: int=0, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single mask.'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[dict[int, int]]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None) -> BatchFeature:
pass
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Pad an image with zeros to the given size.
'''
pass
def pad(self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None) -> BatchFeature:
'''
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
'''
pass
def encode_inputs(self, pixel_values_list: list[ImageInput], segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None):
'''
Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
MaskFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps
will be converted to lists of binary masks and their respective labels. Let's see an example, assuming
`segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =
[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for
each mask.
Args:
pixel_values_list (`list[ImageInput]`):
List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,
width)`.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an
instance segmentation map where each pixel represents an instance id. Can be provided as a single
dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map
instance ids in each image separately.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
objects.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model.
- **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in
`self.model_input_names`).
- **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model
(when `annotations` are provided).
- **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when
`annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of
`mask_labels[i][j]` if `class_labels[i][j]`.
'''
pass
def post_process_segmentation(self, outputs: 'MaskFormerForInstanceSegmentationOutput', target_size: Optional[tuple[int, int]]=None) -> 'torch.Tensor':
'''
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image segmentation predictions. Only
supports PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentationOutput`]):
The outputs from [`MaskFormerForInstanceSegmentation`].
target_size (`tuple[int, int]`, *optional*):
If set, the `masks_queries_logits` will be resized to `target_size`.
Returns:
`torch.Tensor`:
A tensor of shape (`batch_size, num_class_labels, height, width`).
'''
pass
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]]=None) -> 'torch.Tensor':
'''
Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
'''
pass
def post_process_instance_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[list[tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False, return_binary_maps: Optional[bool]=False) -> list[dict]:
'''
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into instance segmentation predictions. Only
supports PyTorch. If instances could overlap, set either return_coco_annotation or return_binary_maps
to `True` to get the correct segmentation result.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
return_coco_annotation (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format.
return_binary_maps (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps
(one per detected instance).
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id`, or
`list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`, or a tensor of shape `(num_instances, height, width)` if return_binary_maps is set to `True`.
Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
'''
pass
def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[set[int]]=None, target_sizes: Optional[list[tuple[int, int]]]=None) -> list[dict]:
'''
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentationOutput`]):
The outputs from [`MaskFormerForInstanceSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
'''
pass
| 21
| 13
| 47
| 4
| 30
| 14
| 4
| 0.53
| 1
| 15
| 3
| 1
| 17
| 13
| 18
| 38
| 921
| 95
| 544
| 242
| 383
| 288
| 244
| 104
| 225
| 17
| 3
| 3
| 77
|
3,660
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.DetrAttention
|
from typing import Optional, Union
import torch
from torch import Tensor, nn
class DetrAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Here, we add position embeddings to the queries and keys (as explained in the DETR paper).
"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor]):
return tensor if object_queries is None else tensor + object_queries
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, spatial_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
batch_size, target_len, embed_dim = hidden_states.size()
if object_queries is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, object_queries)
if spatial_position_embeddings is not None:
key_value_states_original = key_value_states
key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings)
query_states = self.q_proj(hidden_states) * self.scaling
if is_cross_attention:
key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
else:
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is {attn_weights.size()}')
if attention_mask is not None:
if attention_mask.size() != (batch_size, 1, target_len, source_len):
raise ValueError(f'Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is {attention_mask.size()}')
if attention_mask.dtype == torch.bool:
attention_mask = torch.zeros_like(attention_mask, dtype=attn_weights.dtype).masked_fill_(attention_mask, -torch.inf)
attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights_reshaped)
|
class DetrAttention(nn.Module):
'''
Multi-headed attention from 'Attention Is All You Need' paper.
Here, we add position embeddings to the queries and keys (as explained in the DETR paper).
'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
pass
def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, spatial_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 5
| 2
| 29
| 4
| 22
| 3
| 4
| 0.19
| 1
| 6
| 0
| 0
| 4
| 9
| 4
| 14
| 127
| 22
| 88
| 41
| 69
| 17
| 60
| 27
| 55
| 9
| 1
| 2
| 14
|
3,661
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.DetrDecoder
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
from ..detr import DetrConfig
import torch
from torch import Tensor, nn
class DetrDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DetrDecoderLayer`].
The decoder updates the query embeddings through multiple self-attention and cross-attention layers.
Some small tweaks for DETR:
- object_queries and query_position_embeddings are added to the forward pass.
- if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers.
Args:
config: DetrConfig
"""
def __init__(self, config: DetrConfig):
super().__init__()
self.config = config
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.layers = nn.ModuleList([DetrDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
def forward(self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The query embeddings that are passed into the decoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`:
- 1 for queries that are **not masked**,
- 0 for queries that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Position embeddings that are added to the queries and keys in each cross-attention layer.
query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if inputs_embeds is not None:
hidden_states = inputs_embeds
input_shape = inputs_embeds.size()[:-1]
if encoder_hidden_states is not None and encoder_attention_mask is not None:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
intermediate = () if self.config.auxiliary_loss else None
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, None, object_queries, query_position_embeddings, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if self.config.auxiliary_loss:
hidden_states = self.layernorm(hidden_states)
intermediate += (hidden_states,)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layernorm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.config.auxiliary_loss:
intermediate = torch.stack(intermediate)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate] if v is not None))
return DetrDecoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate)
|
class DetrDecoder(nn.Module):
'''
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DetrDecoderLayer`].
The decoder updates the query embeddings through multiple self-attention and cross-attention layers.
Some small tweaks for DETR:
- object_queries and query_position_embeddings are added to the forward pass.
- if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers.
Args:
config: DetrConfig
'''
def __init__(self, config: DetrConfig):
pass
def forward(self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The query embeddings that are passed into the decoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`:
- 1 for queries that are **not masked**,
- 0 for queries that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Position embeddings that are added to the queries and keys in each cross-attention layer.
query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 75
| 11
| 45
| 20
| 11
| 0.53
| 1
| 7
| 3
| 0
| 2
| 6
| 2
| 12
| 165
| 27
| 90
| 29
| 76
| 48
| 48
| 18
| 45
| 21
| 1
| 3
| 22
|
3,662
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.DetrDecoderLayer
|
from torch import Tensor, nn
import torch
from ...modeling_layers import GradientCheckpointingLayer
from typing import Optional, Union
from ..detr import DetrConfig
from ...activations import ACT2FN
class DetrDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: DetrConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = DetrAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = DetrAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*):
object_queries that are added to the hidden states
in the cross-attention layer.
query_position_embeddings (`torch.FloatTensor`, *optional*):
position embeddings that are added to the queries and keys
in the self-attention layer.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, object_queries=query_position_embeddings, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, object_queries=query_position_embeddings, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, spatial_position_embeddings=object_queries, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
class DetrDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: DetrConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*):
object_queries that are added to the hidden states
in the cross-attention layer.
query_position_embeddings (`torch.FloatTensor`, *optional*):
position embeddings that are added to the queries and keys
in the self-attention layer.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 51
| 6
| 33
| 12
| 2
| 0.36
| 1
| 5
| 2
| 0
| 2
| 11
| 2
| 12
| 103
| 12
| 67
| 27
| 55
| 24
| 38
| 18
| 35
| 3
| 1
| 1
| 4
|
3,663
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.DetrDecoderOutput
|
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
from typing import Optional, Union
from dataclasses import dataclass
from ...modeling_outputs import BaseModelOutputWithCrossAttentions
import torch
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of the DETR decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions,\n namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them\n gone through a layernorm. This is useful when training the model with auxiliary decoding losses.\n ')
class DetrDecoderOutput(BaseModelOutputWithCrossAttentions):
"""
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
"""
intermediate_hidden_states: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of the DETR decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions,\n namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them\n gone through a layernorm. This is useful when training the model with auxiliary decoding losses.\n ')
class DetrDecoderOutput(BaseModelOutputWithCrossAttentions):
'''
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 11.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 2
| 2
| 2
| 1
| 23
| 2
| 2
| 1
| 0
| 2
| 0
| 0
|
3,664
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerFPNConvLayer
|
from torch import Tensor, nn
class MaskFormerFPNConvLayer(nn.Module):
def __init__(self, in_features: int, out_features: int, kernel_size: int=3, padding: int=1):
"""
A basic module that executes conv - norm - in sequence used in MaskFormer.
Args:
in_features (`int`):
The number of input features (channels).
out_features (`int`):
The number of outputs features (channels).
"""
super().__init__()
self.layers = [nn.Conv2d(in_features, out_features, kernel_size=kernel_size, padding=padding, bias=False), nn.GroupNorm(32, out_features), nn.ReLU(inplace=True)]
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
|
class MaskFormerFPNConvLayer(nn.Module):
def __init__(self, in_features: int, out_features: int, kernel_size: int=3, padding: int=1):
'''
A basic module that executes conv - norm - in sequence used in MaskFormer.
Args:
in_features (`int`):
The number of input features (channels).
out_features (`int`):
The number of outputs features (channels).
'''
pass
def forward(self, input: Tensor) -> Tensor:
pass
| 3
| 1
| 15
| 1
| 7
| 7
| 2
| 0.93
| 1
| 5
| 0
| 0
| 2
| 1
| 2
| 12
| 31
| 2
| 15
| 7
| 12
| 14
| 11
| 7
| 8
| 2
| 1
| 1
| 4
|
3,665
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerFPNLayer
|
from torch import Tensor, nn
class MaskFormerFPNLayer(nn.Module):
def __init__(self, in_features: int, lateral_features: int):
"""
A Feature Pyramid Network Layer (FPN) layer. It creates a feature map by aggregating features from the previous
and backbone layer. Due to the spatial mismatch, the tensor coming from the previous layer is upsampled.
Args:
in_features (`int`):
The number of input features (channels).
lateral_features (`int`):
The number of lateral features (channels).
"""
super().__init__()
self.proj = nn.Sequential(nn.Conv2d(lateral_features, in_features, kernel_size=1, padding=0, bias=False), nn.GroupNorm(32, in_features))
self.block = MaskFormerFPNConvLayer(in_features, in_features)
def forward(self, down: Tensor, left: Tensor) -> Tensor:
left = self.proj(left)
down = nn.functional.interpolate(down, size=left.shape[-2:], mode='nearest')
down += left
down = self.block(down)
return down
|
class MaskFormerFPNLayer(nn.Module):
def __init__(self, in_features: int, lateral_features: int):
'''
A Feature Pyramid Network Layer (FPN) layer. It creates a feature map by aggregating features from the previous
and backbone layer. Due to the spatial mismatch, the tensor coming from the previous layer is upsampled.
Args:
in_features (`int`):
The number of input features (channels).
lateral_features (`int`):
The number of lateral features (channels).
'''
pass
def forward(self, down: Tensor, left: Tensor) -> Tensor:
pass
| 3
| 1
| 12
| 1
| 7
| 5
| 1
| 0.64
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 26
| 3
| 14
| 5
| 11
| 9
| 11
| 5
| 8
| 1
| 1
| 0
| 2
|
3,666
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerFPNModel
|
from torch import Tensor, nn
class MaskFormerFPNModel(nn.Module):
def __init__(self, in_features: int, lateral_widths: list[int], feature_size: int=256):
"""
Feature Pyramid Network, given an input tensor and a set of feature map of different feature/spatial size, it
creates a list of feature maps with the same feature size.
Args:
in_features (`int`):
The number of input features (channels).
lateral_widths (`list[int]`):
A list with the features (channels) size of each lateral connection.
feature_size (int, *optional*, defaults to 256):
The features (channels) of the resulting feature maps.
"""
super().__init__()
self.stem = MaskFormerFPNConvLayer(in_features, feature_size)
self.layers = nn.Sequential(*[MaskFormerFPNLayer(feature_size, lateral_width) for lateral_width in lateral_widths[::-1]])
def forward(self, features: list[Tensor]) -> list[Tensor]:
fpn_features = []
last_feature = features[-1]
other_features = features[:-1]
output = self.stem(last_feature)
for layer, left in zip(self.layers, other_features[::-1]):
output = layer(output, left)
fpn_features.append(output)
return fpn_features
|
class MaskFormerFPNModel(nn.Module):
def __init__(self, in_features: int, lateral_widths: list[int], feature_size: int=256):
'''
Feature Pyramid Network, given an input tensor and a set of feature map of different feature/spatial size, it
creates a list of feature maps with the same feature size.
Args:
in_features (`int`):
The number of input features (channels).
lateral_widths (`list[int]`):
A list with the features (channels) size of each lateral connection.
feature_size (int, *optional*, defaults to 256):
The features (channels) of the resulting feature maps.
'''
pass
def forward(self, features: list[Tensor]) -> list[Tensor]:
pass
| 3
| 1
| 14
| 1
| 8
| 6
| 2
| 0.69
| 1
| 6
| 2
| 0
| 2
| 2
| 2
| 12
| 29
| 2
| 16
| 10
| 13
| 11
| 14
| 10
| 11
| 2
| 1
| 1
| 3
|
3,667
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentation
|
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
from typing import Optional, Union
from torch import Tensor, nn
import torch
from .configuration_maskformer import MaskFormerConfig
class MaskFormerForInstanceSegmentation(MaskFormerPreTrainedModel):
def __init__(self, config: MaskFormerConfig):
super().__init__(config)
self.model = MaskFormerModel(config)
hidden_size = config.decoder_config.hidden_size
self.class_predictor = nn.Linear(hidden_size, config.num_labels + 1)
self.mask_embedder = MaskformerMLPPredictionHead(hidden_size, hidden_size, config.mask_feature_size)
self.matcher = MaskFormerHungarianMatcher(cost_class=1.0, cost_dice=config.dice_weight, cost_mask=config.mask_weight)
self.weight_dict: dict[str, float] = {'loss_cross_entropy': config.cross_entropy_weight, 'loss_mask': config.mask_weight, 'loss_dice': config.dice_weight}
self.criterion = MaskFormerLoss(config.num_labels, matcher=self.matcher, weight_dict=self.weight_dict, eos_coef=config.no_object_weight)
self.post_init()
def get_loss_dict(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, auxiliary_logits: dict[str, Tensor]) -> dict[str, Tensor]:
loss_dict: dict[str, Tensor] = self.criterion(masks_queries_logits, class_queries_logits, mask_labels, class_labels, auxiliary_logits)
for key, weight in self.weight_dict.items():
for loss_key, loss in loss_dict.items():
if key in loss_key:
loss *= weight
return loss_dict
def get_loss(self, loss_dict: dict[str, Tensor]) -> Tensor:
return sum(loss_dict.values())
def get_logits(self, outputs: MaskFormerModelOutput) -> tuple[Tensor, Tensor, dict[str, Tensor]]:
pixel_embeddings = outputs.pixel_decoder_last_hidden_state
auxiliary_logits: list[str, Tensor] = []
if self.config.use_auxiliary_loss:
stacked_transformer_decoder_outputs = torch.stack(outputs.transformer_decoder_hidden_states)
classes = self.class_predictor(stacked_transformer_decoder_outputs)
class_queries_logits = classes[-1]
mask_embeddings = self.mask_embedder(stacked_transformer_decoder_outputs)
binaries_masks = torch.einsum('lbqc, bchw -> lbqhw', mask_embeddings, pixel_embeddings)
masks_queries_logits = binaries_masks[-1]
for aux_binary_masks, aux_classes in zip(binaries_masks[:-1], classes[:-1]):
auxiliary_logits.append({'masks_queries_logits': aux_binary_masks, 'class_queries_logits': aux_classes})
else:
transformer_decoder_hidden_states = outputs.transformer_decoder_last_hidden_state
classes = self.class_predictor(transformer_decoder_hidden_states)
class_queries_logits = classes
mask_embeddings = self.mask_embedder(transformer_decoder_hidden_states)
masks_queries_logits = torch.einsum('bqc, bchw -> bqhw', mask_embeddings, pixel_embeddings)
return (class_queries_logits, masks_queries_logits, auxiliary_logits)
@auto_docstring
def forward(self, pixel_values: Tensor, mask_labels: Optional[list[Tensor]]=None, class_labels: Optional[list[Tensor]]=None, pixel_mask: Optional[Tensor]=None, output_auxiliary_logits: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> MaskFormerForInstanceSegmentationOutput:
"""
mask_labels (`list[torch.Tensor]`, *optional*):
List of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`list[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
output_auxiliary_logits (`bool`, *optional*):
Whether or not to output auxiliary logits.
Examples:
Semantic segmentation example:
```python
>>> from transformers import AutoImageProcessor, MaskFormerForInstanceSegmentation
>>> from PIL import Image
>>> import requests
>>> # load MaskFormer fine-tuned on ADE20k semantic segmentation
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-ade")
>>> model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-base-ade")
>>> url = (
... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
... )
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to image_processor for postprocessing
>>> predicted_semantic_map = image_processor.post_process_semantic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]
>>> # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs)
>>> list(predicted_semantic_map.shape)
[512, 683]
```
Panoptic segmentation example:
```python
>>> from transformers import AutoImageProcessor, MaskFormerForInstanceSegmentation
>>> from PIL import Image
>>> import requests
>>> # load MaskFormer fine-tuned on COCO panoptic segmentation
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-coco")
>>> model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-base-coco")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to image_processor for postprocessing
>>> result = image_processor.post_process_panoptic_segmentation(outputs, target_sizes=[(image.height, image.width)])[0]
>>> # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs)
>>> predicted_panoptic_map = result["segmentation"]
>>> list(predicted_panoptic_map.shape)
[480, 640]
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
raw_outputs = self.model(pixel_values, pixel_mask, output_hidden_states=output_hidden_states or self.config.use_auxiliary_loss, return_dict=return_dict, output_attentions=output_attentions)
outputs = MaskFormerModelOutput(encoder_last_hidden_state=raw_outputs[0], pixel_decoder_last_hidden_state=raw_outputs[1], transformer_decoder_last_hidden_state=raw_outputs[2], encoder_hidden_states=raw_outputs[3] if output_hidden_states else None, pixel_decoder_hidden_states=raw_outputs[4] if output_hidden_states else None, transformer_decoder_hidden_states=raw_outputs[5] if output_hidden_states else None, hidden_states=raw_outputs[6] if output_hidden_states else None, attentions=raw_outputs[-1] if output_attentions else None)
loss, loss_dict, auxiliary_logits = (None, None, None)
class_queries_logits, masks_queries_logits, auxiliary_logits = self.get_logits(outputs)
if mask_labels is not None and class_labels is not None:
loss_dict: dict[str, Tensor] = self.get_loss_dict(masks_queries_logits, class_queries_logits, mask_labels, class_labels, auxiliary_logits)
loss = self.get_loss(loss_dict)
output_auxiliary_logits = self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits
if not output_auxiliary_logits:
auxiliary_logits = None
if not return_dict:
output = tuple((v for v in (loss, class_queries_logits, masks_queries_logits, auxiliary_logits, *outputs.values()) if v is not None))
return output
return MaskFormerForInstanceSegmentationOutput(loss=loss, **outputs, class_queries_logits=class_queries_logits, masks_queries_logits=masks_queries_logits, auxiliary_logits=auxiliary_logits)
|
class MaskFormerForInstanceSegmentation(MaskFormerPreTrainedModel):
def __init__(self, config: MaskFormerConfig):
pass
def get_loss_dict(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, auxiliary_logits: dict[str, Tensor]) -> dict[str, Tensor]:
pass
def get_loss_dict(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, auxiliary_logits: dict[str, Tensor]) -> dict[str, Tensor]:
pass
def get_logits(self, outputs: MaskFormerModelOutput) -> tuple[Tensor, Tensor, dict[str, Tensor]]:
pass
@auto_docstring
def forward(self, pixel_values: Tensor, mask_labels: Optional[list[Tensor]]=None, class_labels: Optional[list[Tensor]]=None, pixel_mask: Optional[Tensor]=None, output_auxiliary_logits: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> MaskFormerForInstanceSegmentationOutput:
'''
mask_labels (`list[torch.Tensor]`, *optional*):
List of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`list[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
output_auxiliary_logits (`bool`, *optional*):
Whether or not to output auxiliary logits.
Examples:
Semantic segmentation example:
```python
>>> from transformers import AutoImageProcessor, MaskFormerForInstanceSegmentation
>>> from PIL import Image
>>> import requests
>>> # load MaskFormer fine-tuned on ADE20k semantic segmentation
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-ade")
>>> model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-base-ade")
>>> url = (
... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
... )
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to image_processor for postprocessing
>>> predicted_semantic_map = image_processor.post_process_semantic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]
>>> # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs)
>>> list(predicted_semantic_map.shape)
[512, 683]
```
Panoptic segmentation example:
```python
>>> from transformers import AutoImageProcessor, MaskFormerForInstanceSegmentation
>>> from PIL import Image
>>> import requests
>>> # load MaskFormer fine-tuned on COCO panoptic segmentation
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-coco")
>>> model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-base-coco")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to image_processor for postprocessing
>>> result = image_processor.post_process_panoptic_segmentation(outputs, target_sizes=[(image.height, image.width)])[0]
>>> # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs)
>>> predicted_panoptic_map = result["segmentation"]
>>> list(predicted_panoptic_map.shape)
[480, 640]
```
'''
pass
| 7
| 1
| 49
| 7
| 28
| 14
| 5
| 0.5
| 1
| 16
| 7
| 0
| 5
| 6
| 5
| 6
| 250
| 39
| 141
| 53
| 116
| 70
| 69
| 35
| 63
| 13
| 2
| 3
| 26
|
3,668
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput
|
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
from typing import Optional, Union
import torch
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`MaskFormerForInstanceSegmentation`].\n\n This output can be directly passed to [`~MaskFormerImageProcessor.post_process_semantic_segmentation`] or or\n [`~MaskFormerImageProcessor.post_process_instance_segmentation`] or\n [`~MaskFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see\n [`~MaskFormerImageProcessor] for details regarding usage.\n ')
class MaskFormerForInstanceSegmentationOutput(ModelOutput):
"""
loss (`torch.Tensor`, *optional*):
The computed loss, returned when labels are present.
class_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each
query. Note the `+ 1` is needed because we incorporate the null class.
masks_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each
query.
auxiliary_logits (`Dict[str, torch.FloatTensor]`, *optional*, returned when `output_auxiliary_logits=True`):
Dictionary containing auxiliary predictions for each decoder layer when auxiliary losses are enabled.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the encoder model (backbone).
pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN).
transformer_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Last hidden states (final feature map) of the last stage of the transformer decoder model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the transformer decoder at the output
of each stage.
hidden_states `tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` containing `encoder_hidden_states`, `pixel_decoder_hidden_states` and
`decoder_hidden_states`.
"""
loss: Optional[torch.FloatTensor] = None
class_queries_logits: Optional[torch.FloatTensor] = None
masks_queries_logits: Optional[torch.FloatTensor] = None
auxiliary_logits: Optional[torch.FloatTensor] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
pixel_decoder_last_hidden_state: Optional[torch.FloatTensor] = None
transformer_decoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`MaskFormerForInstanceSegmentation`].\n\n This output can be directly passed to [`~MaskFormerImageProcessor.post_process_semantic_segmentation`] or or\n [`~MaskFormerImageProcessor.post_process_instance_segmentation`] or\n [`~MaskFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see\n [`~MaskFormerImageProcessor] for details regarding usage.\n ')
class MaskFormerForInstanceSegmentationOutput(ModelOutput):
'''
loss (`torch.Tensor`, *optional*):
The computed loss, returned when labels are present.
class_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each
query. Note the `+ 1` is needed because we incorporate the null class.
masks_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each
query.
auxiliary_logits (`Dict[str, torch.FloatTensor]`, *optional*, returned when `output_auxiliary_logits=True`):
Dictionary containing auxiliary predictions for each decoder layer when auxiliary losses are enabled.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the encoder model (backbone).
pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN).
transformer_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Last hidden states (final feature map) of the last stage of the transformer decoder model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the transformer decoder at the output
of each stage.
hidden_states `tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` containing `encoder_hidden_states`, `pixel_decoder_hidden_states` and
`decoder_hidden_states`.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.15
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 3
| 13
| 13
| 12
| 41
| 13
| 13
| 12
| 0
| 1
| 0
| 0
|
3,669
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerHungarianMatcher
|
from torch import Tensor, nn
import numpy as np
import torch
class MaskFormerHungarianMatcher(nn.Module):
"""This class computes an assignment between the labels and the predictions of the network.
For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more
predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float=1.0, cost_mask: float=1.0, cost_dice: float=1.0):
"""Creates the matcher
Params:
cost_class (float, *optional*, defaults to 1.0):
This is the relative weight of the classification error in the matching cost.
cost_mask (float, *optional*, defaults to 1.0):
This is the relative weight of the focal loss of the binary mask in the matching cost.
cost_dice (float, *optional*, defaults to 1.0):
This is the relative weight of the dice loss of the binary mask in the matching cost
"""
super().__init__()
if cost_class == 0 and cost_mask == 0 and (cost_dice == 0):
raise ValueError("All costs can't be 0")
self.cost_class = cost_class
self.cost_mask = cost_mask
self.cost_dice = cost_dice
@torch.no_grad()
def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> list[tuple[Tensor]]:
"""Performs the matching
Params:
masks_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, num_labels` with the
classification logits.
class_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, height, width` with the
predicted masks.
class_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes` (where num_target_boxes is the number
of ground-truth objects in the target) containing the class labels.
mask_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes, height, width` containing the target
masks.
Returns:
`list[tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected labels (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes).
"""
indices: list[tuple[np.array]] = []
preds_masks = masks_queries_logits
preds_probs = class_queries_logits
for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels):
target_mask = nn.functional.interpolate(target_mask[:, None], size=pred_mask.shape[-2:], mode='nearest')
pred_probs = pred_probs.softmax(-1)
cost_class = -pred_probs[:, labels]
pred_mask_flat = pred_mask.flatten(1)
target_mask_flat = target_mask[:, 0].flatten(1)
cost_mask = pair_wise_sigmoid_focal_loss(pred_mask_flat, target_mask_flat)
cost_dice = pair_wise_dice_loss(pred_mask_flat, target_mask_flat)
cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice
assigned_indices: tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())
indices.append(assigned_indices)
matched_indices = [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
return matched_indices
def __repr__(self):
head = 'Matcher ' + self.__class__.__name__
body = [f'cost_class: {self.cost_class}', f'cost_mask: {self.cost_mask}', f'cost_dice: {self.cost_dice}']
_repr_indent = 4
lines = [head] + [' ' * _repr_indent + line for line in body]
return '\n'.join(lines)
|
class MaskFormerHungarianMatcher(nn.Module):
'''This class computes an assignment between the labels and the predictions of the network.
For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more
predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
'''
def __init__(self, cost_class: float=1.0, cost_mask: float=1.0, cost_dice: float=1.0):
'''Creates the matcher
Params:
cost_class (float, *optional*, defaults to 1.0):
This is the relative weight of the classification error in the matching cost.
cost_mask (float, *optional*, defaults to 1.0):
This is the relative weight of the focal loss of the binary mask in the matching cost.
cost_dice (float, *optional*, defaults to 1.0):
This is the relative weight of the dice loss of the binary mask in the matching cost
'''
pass
@torch.no_grad()
def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> list[tuple[Tensor]]:
'''Performs the matching
Params:
masks_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, num_labels` with the
classification logits.
class_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, height, width` with the
predicted masks.
class_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes` (where num_target_boxes is the number
of ground-truth objects in the target) containing the class labels.
mask_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes, height, width` containing the target
masks.
Returns:
`list[tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected labels (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes).
'''
pass
def __repr__(self):
pass
| 5
| 3
| 28
| 2
| 12
| 15
| 2
| 1.29
| 1
| 5
| 0
| 0
| 3
| 3
| 3
| 13
| 95
| 10
| 38
| 24
| 33
| 49
| 31
| 23
| 27
| 2
| 1
| 1
| 5
|
3,670
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerLoss
|
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
from typing import Optional, Union
from numbers import Number
from torch import Tensor, nn
import torch
import numpy as np
class MaskFormerLoss(nn.Module):
def __init__(self, num_labels: int, matcher: MaskFormerHungarianMatcher, weight_dict: dict[str, float], eos_coef: float):
"""
The MaskFormer Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we compute
hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair of
matched ground-truth / prediction (supervise class and mask)
Args:
num_labels (`int`):
The number of classes.
matcher (`MaskFormerHungarianMatcher`):
A torch module that computes the assignments between the predictions and labels.
weight_dict (`dict[str, float]`):
A dictionary of weights to be applied to the different losses.
eos_coef (`float`):
Weight to apply to the null class.
"""
super().__init__()
requires_backends(self, ['scipy'])
self.num_labels = num_labels
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
empty_weight = torch.ones(self.num_labels + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def _max_by_axis(self, the_list: list[list[int]]) -> list[int]:
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def _pad_images_to_max_in_batch(self, tensors: list[Tensor]) -> tuple[Tensor, Tensor]:
max_size = self._max_by_axis([list(tensor.shape) for tensor in tensors])
batch_size = len(tensors)
batch_shape = [batch_size] + max_size
b, _, h, w = batch_shape
dtype = tensors[0].dtype
device = tensors[0].device
padded_tensors = torch.zeros(batch_shape, dtype=dtype, device=device)
padding_masks = torch.ones((b, h, w), dtype=torch.bool, device=device)
for tensor, padded_tensor, padding_mask in zip(tensors, padded_tensors, padding_masks):
padded_tensor[:tensor.shape[0], :tensor.shape[1], :tensor.shape[2]].copy_(tensor)
padding_mask[:tensor.shape[1], :tensor.shape[2]] = False
return (padded_tensors, padding_masks)
def loss_labels(self, class_queries_logits: Tensor, class_labels: list[Tensor], indices: tuple[np.array]) -> dict[str, Tensor]:
"""Compute the losses related to the labels using cross entropy.
Args:
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
"""
pred_logits = class_queries_logits
batch_size, num_queries, _ = pred_logits.shape
criterion = nn.CrossEntropyLoss(weight=self.empty_weight)
idx = self._get_predictions_permutation_indices(indices)
target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)])
target_classes = torch.full((batch_size, num_queries), fill_value=self.num_labels, dtype=torch.int64, device=pred_logits.device)
target_classes[idx] = target_classes_o
pred_logits_transposed = pred_logits.transpose(1, 2)
loss_ce = criterion(pred_logits_transposed, target_classes)
losses = {'loss_cross_entropy': loss_ce}
return losses
def loss_masks(self, masks_queries_logits: Tensor, mask_labels: list[Tensor], indices: tuple[np.array], num_masks: int) -> dict[str, Tensor]:
"""Compute the losses related to the masks using focal and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid focal loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
"""
src_idx = self._get_predictions_permutation_indices(indices)
tgt_idx = self._get_targets_permutation_indices(indices)
pred_masks = masks_queries_logits[src_idx]
target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)
target_masks = target_masks[tgt_idx]
pred_masks = nn.functional.interpolate(pred_masks[:, None], size=target_masks.shape[-2:], mode='bilinear', align_corners=False)
pred_masks = pred_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
losses = {'loss_mask': sigmoid_focal_loss(pred_masks, target_masks, num_masks), 'loss_dice': dice_loss(pred_masks, target_masks, num_masks)}
return losses
def _get_predictions_permutation_indices(self, indices):
batch_indices = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
predictions_indices = torch.cat([src for src, _ in indices])
return (batch_indices, predictions_indices)
def _get_targets_permutation_indices(self, indices):
batch_indices = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
target_indices = torch.cat([tgt for _, tgt in indices])
return (batch_indices, target_indices)
def forward(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, mask_labels: list[Tensor], class_labels: list[Tensor], auxiliary_predictions: Optional[dict[str, Tensor]]=None) -> dict[str, Tensor]:
"""
This performs the loss computation.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
auxiliary_predictions (`dict[str, torch.Tensor]`, *optional*):
if `use_auxiliary_loss` was set to `true` in [`MaskFormerConfig`], then it contains the logits from the
inner layers of the Detr's Decoder.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
- **loss_mask** -- The loss computed using sigmoid focal loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
if `use_auxiliary_loss` was set to `true` in [`MaskFormerConfig`], the dictionary contains additional losses
for each auxiliary predictions.
"""
indices = self.matcher(masks_queries_logits, class_queries_logits, mask_labels, class_labels)
num_masks: Number = self.get_num_masks(class_labels, device=class_labels[0].device)
losses: dict[str, Tensor] = {**self.loss_masks(masks_queries_logits, mask_labels, indices, num_masks), **self.loss_labels(class_queries_logits, class_labels, indices)}
if auxiliary_predictions is not None:
for idx, aux_outputs in enumerate(auxiliary_predictions):
masks_queries_logits = aux_outputs['masks_queries_logits']
class_queries_logits = aux_outputs['class_queries_logits']
loss_dict = self.forward(masks_queries_logits, class_queries_logits, mask_labels, class_labels)
loss_dict = {f'{key}_{idx}': value for key, value in loss_dict.items()}
losses.update(loss_dict)
return losses
def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor:
"""
Computes the average number of target masks across the batch, for normalization purposes.
"""
num_masks = sum([len(classes) for classes in class_labels])
num_masks = torch.as_tensor(num_masks, dtype=torch.float, device=device)
world_size = 1
if is_accelerate_available():
if PartialState._shared_state != {}:
num_masks = reduce(num_masks)
world_size = PartialState().num_processes
num_masks = torch.clamp(num_masks / world_size, min=1)
return num_masks
|
class MaskFormerLoss(nn.Module):
def __init__(self, num_labels: int, matcher: MaskFormerHungarianMatcher, weight_dict: dict[str, float], eos_coef: float):
'''
The MaskFormer Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we compute
hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair of
matched ground-truth / prediction (supervise class and mask)
Args:
num_labels (`int`):
The number of classes.
matcher (`MaskFormerHungarianMatcher`):
A torch module that computes the assignments between the predictions and labels.
weight_dict (`dict[str, float]`):
A dictionary of weights to be applied to the different losses.
eos_coef (`float`):
Weight to apply to the null class.
'''
pass
def _max_by_axis(self, the_list: list[list[int]]) -> list[int]:
pass
def _pad_images_to_max_in_batch(self, tensors: list[Tensor]) -> tuple[Tensor, Tensor]:
pass
def loss_labels(self, class_queries_logits: Tensor, class_labels: list[Tensor], indices: tuple[np.array]) -> dict[str, Tensor]:
'''Compute the losses related to the labels using cross entropy.
Args:
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
'''
pass
def loss_masks(self, masks_queries_logits: Tensor, mask_labels: list[Tensor], indices: tuple[np.array], num_masks: int) -> dict[str, Tensor]:
'''Compute the losses related to the masks using focal and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid focal loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
'''
pass
def _get_predictions_permutation_indices(self, indices):
pass
def _get_targets_permutation_indices(self, indices):
pass
def forward(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, mask_labels: list[Tensor], class_labels: list[Tensor], auxiliary_predictions: Optional[dict[str, Tensor]]=None) -> dict[str, Tensor]:
'''
This performs the loss computation.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
auxiliary_predictions (`dict[str, torch.Tensor]`, *optional*):
if `use_auxiliary_loss` was set to `true` in [`MaskFormerConfig`], then it contains the logits from the
inner layers of the Detr's Decoder.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
- **loss_mask** -- The loss computed using sigmoid focal loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
if `use_auxiliary_loss` was set to `true` in [`MaskFormerConfig`], the dictionary contains additional losses
for each auxiliary predictions.
'''
pass
def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor:
'''
Computes the average number of target masks across the batch, for normalization purposes.
'''
pass
| 10
| 5
| 23
| 2
| 12
| 9
| 2
| 0.77
| 1
| 12
| 1
| 0
| 9
| 4
| 9
| 19
| 217
| 22
| 110
| 69
| 83
| 85
| 83
| 52
| 73
| 3
| 1
| 2
| 16
|
3,671
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerModel
|
from .configuration_maskformer import MaskFormerConfig
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
from typing import Optional, Union
from torch import Tensor, nn
import torch
@auto_docstring
class MaskFormerModel(MaskFormerPreTrainedModel):
def __init__(self, config: MaskFormerConfig):
super().__init__(config)
self.pixel_level_module = MaskFormerPixelLevelModule(config)
self.transformer_module = MaskFormerTransformerModule(in_features=self.pixel_level_module.encoder.channels[-1], config=config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: Tensor, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> MaskFormerModelOutput:
"""
Examples:
```python
>>> from transformers import AutoImageProcessor, MaskFormerModel
>>> from PIL import Image
>>> import requests
>>> # load MaskFormer fine-tuned on ADE20k semantic segmentation
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-ade")
>>> model = MaskFormerModel.from_pretrained("facebook/maskformer-swin-base-ade")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> # the decoder of MaskFormer outputs hidden states of shape (batch_size, num_queries, hidden_size)
>>> transformer_decoder_last_hidden_state = outputs.transformer_decoder_last_hidden_state
>>> list(transformer_decoder_last_hidden_state.shape)
[1, 100, 256]
```"""
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, _, height, width = pixel_values.shape
if pixel_mask is None:
pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device)
pixel_level_module_output = self.pixel_level_module(pixel_values, output_hidden_states, return_dict=return_dict)
image_features = pixel_level_module_output[0]
pixel_embeddings = pixel_level_module_output[1]
transformer_module_output = self.transformer_module(image_features, output_hidden_states, output_attentions)
queries = transformer_module_output.last_hidden_state
encoder_hidden_states = None
pixel_decoder_hidden_states = None
transformer_decoder_hidden_states = None
hidden_states = None
if output_hidden_states:
encoder_hidden_states = pixel_level_module_output[2]
pixel_decoder_hidden_states = pixel_level_module_output[3]
transformer_decoder_hidden_states = transformer_module_output[1]
hidden_states = encoder_hidden_states + pixel_decoder_hidden_states + transformer_decoder_hidden_states
output = MaskFormerModelOutput(encoder_last_hidden_state=image_features, pixel_decoder_last_hidden_state=pixel_embeddings, transformer_decoder_last_hidden_state=queries, encoder_hidden_states=encoder_hidden_states, pixel_decoder_hidden_states=pixel_decoder_hidden_states, transformer_decoder_hidden_states=transformer_decoder_hidden_states, hidden_states=hidden_states, attentions=transformer_module_output.attentions)
if not return_dict:
output = tuple((v for v in output.values()))
return output
|
@auto_docstring
class MaskFormerModel(MaskFormerPreTrainedModel):
def __init__(self, config: MaskFormerConfig):
pass
@auto_docstring
def forward(self, pixel_values: Tensor, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> MaskFormerModelOutput:
'''
Examples:
```python
>>> from transformers import AutoImageProcessor, MaskFormerModel
>>> from PIL import Image
>>> import requests
>>> # load MaskFormer fine-tuned on ADE20k semantic segmentation
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-ade")
>>> model = MaskFormerModel.from_pretrained("facebook/maskformer-swin-base-ade")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> # the decoder of MaskFormer outputs hidden states of shape (batch_size, num_queries, hidden_size)
>>> transformer_decoder_last_hidden_state = outputs.transformer_decoder_last_hidden_state
>>> list(transformer_decoder_last_hidden_state.shape)
[1, 100, 256]
```'''
pass
| 5
| 1
| 47
| 10
| 27
| 10
| 5
| 0.35
| 1
| 9
| 4
| 0
| 2
| 2
| 2
| 3
| 97
| 20
| 57
| 24
| 45
| 20
| 33
| 16
| 30
| 8
| 2
| 1
| 9
|
3,672
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput
|
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
from typing import Optional, Union
import torch
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`MaskFormerModel`]. This class returns all the needed hidden states to compute the logits.\n ')
class MaskFormerModelOutput(ModelOutput):
"""
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the encoder model (backbone).
pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN).
transformer_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Last hidden states (final feature map) of the last stage of the transformer decoder model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
hidden_states `tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` containing `encoder_hidden_states`, `pixel_decoder_hidden_states` and
`decoder_hidden_states`
"""
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
pixel_decoder_last_hidden_state: Optional[torch.FloatTensor] = None
transformer_decoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`MaskFormerModel`]. This class returns all the needed hidden states to compute the logits.\n ')
class MaskFormerModelOutput(ModelOutput):
'''
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the encoder model (backbone).
pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN).
transformer_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Last hidden states (final feature map) of the last stage of the transformer decoder model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
hidden_states `tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` containing `encoder_hidden_states`, `pixel_decoder_hidden_states` and
`decoder_hidden_states`
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.22
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 2
| 9
| 9
| 8
| 29
| 9
| 9
| 8
| 0
| 1
| 0
| 0
|
3,673
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerPixelDecoder
|
from torch import Tensor, nn
class MaskFormerPixelDecoder(nn.Module):
def __init__(self, *args, feature_size: int=256, mask_feature_size: int=256, **kwargs):
"""
Pixel Decoder Module proposed in [Per-Pixel Classification is Not All You Need for Semantic
Segmentation](https://huggingface.co/papers/2107.06278). It first runs the backbone's features into a Feature Pyramid
Network creating a list of feature maps. Then, it projects the last one to the correct `mask_size`.
Args:
feature_size (`int`, *optional*, defaults to 256):
The feature size (channel dimension) of the FPN feature maps.
mask_feature_size (`int`, *optional*, defaults to 256):
The features (channels) of the target masks size \\\\(C_{\\epsilon}\\\\) in the paper.
"""
super().__init__()
self.fpn = MaskFormerFPNModel(*args, feature_size=feature_size, **kwargs)
self.mask_projection = nn.Conv2d(feature_size, mask_feature_size, kernel_size=3, padding=1)
def forward(self, features: list[Tensor], output_hidden_states: bool=False, return_dict: bool=True) -> MaskFormerPixelDecoderOutput:
fpn_features = self.fpn(features)
last_feature_projected = self.mask_projection(fpn_features[-1])
if not return_dict:
return (last_feature_projected, tuple(fpn_features)) if output_hidden_states else (last_feature_projected,)
return MaskFormerPixelDecoderOutput(last_hidden_state=last_feature_projected, hidden_states=tuple(fpn_features) if output_hidden_states else ())
|
class MaskFormerPixelDecoder(nn.Module):
def __init__(self, *args, feature_size: int=256, mask_feature_size: int=256, **kwargs):
'''
Pixel Decoder Module proposed in [Per-Pixel Classification is Not All You Need for Semantic
Segmentation](https://huggingface.co/papers/2107.06278). It first runs the backbone's features into a Feature Pyramid
Network creating a list of feature maps. Then, it projects the last one to the correct `mask_size`.
Args:
feature_size (`int`, *optional*, defaults to 256):
The feature size (channel dimension) of the FPN feature maps.
mask_feature_size (`int`, *optional*, defaults to 256):
The features (channels) of the target masks size \\(C_{\epsilon}\\) in the paper.
'''
pass
def forward(self, features: list[Tensor], output_hidden_states: bool=False, return_dict: bool=True) -> MaskFormerPixelDecoderOutput:
pass
| 3
| 1
| 15
| 2
| 7
| 6
| 3
| 0.73
| 1
| 7
| 2
| 0
| 2
| 2
| 2
| 12
| 31
| 5
| 15
| 9
| 10
| 11
| 11
| 7
| 8
| 4
| 1
| 1
| 5
|
3,674
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerPixelDecoderOutput
|
from dataclasses import dataclass
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
@dataclass
@auto_docstring(custom_intro="\n MaskFormer's pixel decoder module output, practically a Feature Pyramid Network. It returns the last hidden state\n and (optionally) the hidden states.\n ")
class MaskFormerPixelDecoderOutput(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the model.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n MaskFormer's pixel decoder module output, practically a Feature Pyramid Network. It returns the last hidden state\n and (optionally) the hidden states.\n ")
class MaskFormerPixelDecoderOutput(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the model.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 2
| 4
| 4
| 3
| 15
| 4
| 4
| 3
| 0
| 1
| 0
| 0
|
3,675
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerPixelLevelModule
|
from .configuration_maskformer_swin import MaskFormerSwinConfig
from ...utils.backbone_utils import load_backbone
from .configuration_maskformer import MaskFormerConfig
from torch import Tensor, nn
class MaskFormerPixelLevelModule(nn.Module):
def __init__(self, config: MaskFormerConfig):
"""
Pixel Level Module proposed in [Per-Pixel Classification is Not All You Need for Semantic
Segmentation](https://huggingface.co/papers/2107.06278). It runs the input image through a backbone and a pixel
decoder, generating an image feature map and pixel embeddings.
Args:
config ([`MaskFormerConfig`]):
The configuration used to instantiate this model.
"""
super().__init__()
if getattr(config, 'backbone_config') is not None and config.backbone_config.model_type == 'swin':
backbone_config = config.backbone_config
backbone_config = MaskFormerSwinConfig.from_dict(backbone_config.to_dict())
backbone_config.out_features = ['stage1', 'stage2', 'stage3', 'stage4']
config.backbone_config = backbone_config
self.encoder = load_backbone(config)
feature_channels = self.encoder.channels
self.decoder = MaskFormerPixelDecoder(in_features=feature_channels[-1], feature_size=config.fpn_feature_size, mask_feature_size=config.mask_feature_size, lateral_widths=feature_channels[:-1])
def forward(self, pixel_values: Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> MaskFormerPixelLevelModuleOutput:
features = self.encoder(pixel_values).feature_maps
decoder_output = self.decoder(features, output_hidden_states, return_dict=return_dict)
if not return_dict:
last_hidden_state = decoder_output[0]
outputs = (features[-1], last_hidden_state)
if output_hidden_states:
hidden_states = decoder_output[1]
outputs = outputs + (tuple(features),) + (hidden_states,)
return outputs
return MaskFormerPixelLevelModuleOutput(encoder_last_hidden_state=features[-1], decoder_last_hidden_state=decoder_output.last_hidden_state, encoder_hidden_states=tuple(features) if output_hidden_states else (), decoder_hidden_states=decoder_output.hidden_states if output_hidden_states else ())
|
class MaskFormerPixelLevelModule(nn.Module):
def __init__(self, config: MaskFormerConfig):
'''
Pixel Level Module proposed in [Per-Pixel Classification is Not All You Need for Semantic
Segmentation](https://huggingface.co/papers/2107.06278). It runs the input image through a backbone and a pixel
decoder, generating an image feature map and pixel embeddings.
Args:
config ([`MaskFormerConfig`]):
The configuration used to instantiate this model.
'''
pass
def forward(self, pixel_values: Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> MaskFormerPixelLevelModuleOutput:
pass
| 3
| 1
| 24
| 2
| 17
| 5
| 4
| 0.29
| 1
| 8
| 4
| 0
| 2
| 2
| 2
| 12
| 49
| 5
| 34
| 14
| 29
| 10
| 22
| 12
| 19
| 5
| 1
| 2
| 7
|
3,676
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerPixelLevelModuleOutput
|
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
from typing import Optional, Union
import torch
@dataclass
@auto_docstring(custom_intro="\n MaskFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the\n `encoder` and `decoder`. By default, the `encoder` is a MaskFormerSwin Transformer and the `decoder` is a Feature\n Pyramid Network (FPN).\n\n The `encoder_last_hidden_state` are referred on the paper as **images features**, while `decoder_last_hidden_state`\n as **pixel embeddings**\n ")
class MaskFormerPixelLevelModuleOutput(ModelOutput):
"""
encoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the encoder.
decoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the decoder.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at
the output of each stage.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at
the output of each stage.
"""
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
decoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n MaskFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the\n `encoder` and `decoder`. By default, the `encoder` is a MaskFormerSwin Transformer and the `decoder` is a Feature\n Pyramid Network (FPN).\n\n The `encoder_last_hidden_state` are referred on the paper as **images features**, while `decoder_last_hidden_state`\n as **pixel embeddings**\n ")
class MaskFormerPixelLevelModuleOutput(ModelOutput):
'''
encoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the encoder.
decoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the decoder.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at
the output of each stage.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at
the output of each stage.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 3
| 5
| 5
| 4
| 20
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
3,677
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerPreTrainedModel
|
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
from torch import Tensor, nn
from ...modeling_utils import PreTrainedModel
from .configuration_maskformer import MaskFormerConfig
@auto_docstring
class MaskFormerPreTrainedModel(PreTrainedModel):
config: MaskFormerConfig
base_model_prefix = 'model'
main_input_name = 'pixel_values'
def _init_weights(self, module: nn.Module):
xavier_std = self.config.init_xavier_std
std = self.config.init_std
if isinstance(module, MaskFormerTransformerModule):
if module.input_projection is not None:
nn.init.xavier_uniform_(module.input_projection.weight, gain=xavier_std)
nn.init.constant_(module.input_projection.bias, 0)
elif isinstance(module, MaskFormerFPNModel):
nn.init.xavier_uniform_(module.stem.get_submodule('0').weight, gain=xavier_std)
elif isinstance(module, MaskFormerFPNLayer):
nn.init.xavier_uniform_(module.proj[0].weight, gain=xavier_std)
elif isinstance(module, MaskFormerFPNConvLayer):
nn.init.xavier_uniform_(module.get_submodule('0').weight, gain=xavier_std)
elif isinstance(module, MaskformerMLPPredictionHead):
for submodule in module.modules():
if isinstance(submodule, nn.Linear):
nn.init.xavier_uniform_(submodule.weight, gain=xavier_std)
nn.init.constant_(submodule.bias, 0)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
@auto_docstring
class MaskFormerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
pass
| 3
| 0
| 38
| 2
| 29
| 7
| 14
| 0.21
| 1
| 5
| 5
| 2
| 1
| 0
| 1
| 1
| 43
| 3
| 33
| 8
| 31
| 7
| 27
| 8
| 25
| 14
| 1
| 3
| 14
|
3,678
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerSinePositionEmbedding
|
from typing import Optional, Union
from ...pytorch_utils import compile_compatible_method_lru_cache
from torch import Tensor, nn
import torch
import math
class MaskFormerSinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats: int=64, temperature: int=10000, normalize: bool=False, scale: Optional[float]=None):
super().__init__()
if scale is not None and normalize is False:
raise ValueError('normalize should be True if scale is passed')
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
self.scale = 2 * math.pi if scale is None else scale
@compile_compatible_method_lru_cache(maxsize=1)
def forward(self, shape: torch.Size, device: Union[torch.device, str], dtype: torch.dtype, mask: Optional[Tensor]=None) -> Tensor:
if mask is None:
mask = torch.zeros((shape[0], shape[2], shape[3]), device=device, dtype=torch.bool)
not_mask = (~mask).to(dtype)
y_embed = not_mask.cumsum(1)
x_embed = not_mask.cumsum(2)
if self.normalize:
eps = 1e-06
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=device).to(dtype)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
|
class MaskFormerSinePositionEmbedding(nn.Module):
'''
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
'''
def __init__(self, num_pos_feats: int=64, temperature: int=10000, normalize: bool=False, scale: Optional[float]=None):
pass
@compile_compatible_method_lru_cache(maxsize=1)
def forward(self, shape: torch.Size, device: Union[torch.device, str], dtype: torch.dtype, mask: Optional[Tensor]=None) -> Tensor:
pass
| 4
| 1
| 15
| 1
| 14
| 0
| 3
| 0.14
| 1
| 6
| 0
| 0
| 2
| 4
| 2
| 12
| 37
| 4
| 29
| 17
| 24
| 4
| 27
| 15
| 24
| 3
| 1
| 1
| 6
|
3,679
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskFormerTransformerModule
|
from torch import Tensor, nn
from .configuration_maskformer import MaskFormerConfig
import torch
from typing import Optional, Union
class MaskFormerTransformerModule(nn.Module):
"""
The MaskFormer's transformer module.
"""
def __init__(self, in_features: int, config: MaskFormerConfig):
super().__init__()
hidden_size = config.decoder_config.hidden_size
should_project = in_features != hidden_size
self.position_embedder = MaskFormerSinePositionEmbedding(num_pos_feats=hidden_size // 2, normalize=True)
self.queries_embedder = nn.Embedding(config.decoder_config.num_queries, hidden_size)
self.input_projection = nn.Conv2d(in_features, hidden_size, kernel_size=1) if should_project else None
self.decoder = DetrDecoder(config=config.decoder_config)
def forward(self, image_features: Tensor, output_hidden_states: bool=False, output_attentions: bool=False, return_dict: Optional[bool]=None) -> DetrDecoderOutput:
if self.input_projection is not None:
image_features = self.input_projection(image_features)
object_queries = self.position_embedder(image_features.shape, image_features.device, image_features.dtype)
batch_size = image_features.shape[0]
queries_embeddings = self.queries_embedder.weight.unsqueeze(0).repeat(batch_size, 1, 1)
inputs_embeds = torch.zeros_like(queries_embeddings, requires_grad=self.training)
if self.training:
inputs_embeds.requires_grad_(True)
batch_size, num_channels, height, width = image_features.shape
image_features = image_features.view(batch_size, num_channels, height * width).permute(0, 2, 1)
object_queries = object_queries.view(batch_size, num_channels, height * width).permute(0, 2, 1)
decoder_output: DetrDecoderOutput = self.decoder(inputs_embeds=inputs_embeds, attention_mask=None, encoder_hidden_states=image_features, encoder_attention_mask=None, object_queries=object_queries, query_position_embeddings=queries_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
return decoder_output
|
class MaskFormerTransformerModule(nn.Module):
'''
The MaskFormer's transformer module.
'''
def __init__(self, in_features: int, config: MaskFormerConfig):
pass
def forward(self, image_features: Tensor, output_hidden_states: bool=False, output_attentions: bool=False, return_dict: Optional[bool]=None) -> DetrDecoderOutput:
pass
| 3
| 1
| 20
| 1
| 18
| 1
| 2
| 0.14
| 1
| 8
| 4
| 0
| 2
| 4
| 2
| 12
| 46
| 4
| 37
| 21
| 28
| 5
| 21
| 15
| 18
| 2
| 1
| 1
| 4
|
3,680
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer.py
|
transformers.models.maskformer.modeling_maskformer.MaskformerMLPPredictionHead
|
from torch import Tensor, nn
class MaskformerMLPPredictionHead(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int=3):
"""
A classic Multi Layer Perceptron (MLP).
Args:
input_dim (`int`):
The input dimensions.
hidden_dim (`int`):
The hidden dimensions.
output_dim (`int`):
The output dimensions.
num_layers (int, *optional*, defaults to 3):
The number of layers.
"""
super().__init__()
in_dims = [input_dim] + [hidden_dim] * (num_layers - 1)
out_dims = [hidden_dim] * (num_layers - 1) + [output_dim]
self.layers = []
for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):
activation = nn.ReLU() if i < num_layers - 1 else nn.Identity()
layer = PredictionBlock(in_dim, out_dim, activation=activation)
self.layers.append(layer)
self.add_module(str(i), layer)
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
|
class MaskformerMLPPredictionHead(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int=3):
'''
A classic Multi Layer Perceptron (MLP).
Args:
input_dim (`int`):
The input dimensions.
hidden_dim (`int`):
The hidden dimensions.
output_dim (`int`):
The output dimensions.
num_layers (int, *optional*, defaults to 3):
The number of layers.
'''
pass
def forward(self, input: Tensor) -> Tensor:
pass
| 3
| 1
| 18
| 1
| 8
| 9
| 3
| 1.13
| 1
| 7
| 1
| 0
| 2
| 1
| 2
| 12
| 37
| 3
| 16
| 11
| 13
| 18
| 16
| 11
| 13
| 3
| 1
| 1
| 5
|
3,681
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinAttention
|
import torch
from typing import Optional
from torch import Tensor, nn
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
class MaskFormerSwinAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
self.self = MaskFormerSwinSelfAttention(config, dim, num_heads, window_size)
self.output = MaskFormerSwinSelfOutput(config, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class MaskFormerSwinAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 11
| 1
| 10
| 1
| 1
| 0.1
| 1
| 6
| 2
| 0
| 3
| 3
| 3
| 13
| 36
| 4
| 30
| 17
| 20
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
3,682
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinBackbone
|
from .configuration_maskformer_swin import MaskFormerSwinConfig
from ...utils.backbone_utils import BackboneMixin
from torch import Tensor, nn
from ...modeling_outputs import BackboneOutput
from typing import Optional
class MaskFormerSwinBackbone(MaskFormerSwinPreTrainedModel, BackboneMixin):
"""
MaskFormerSwin backbone, designed especially for the MaskFormer framework.
This classes reshapes `hidden_states` from (`batch_size, sequence_length, hidden_size)` to (`batch_size,
num_channels, height, width)`). It also adds additional layernorms after each stage.
Args:
config (`MaskFormerSwinConfig`):
The configuration used by [`MaskFormerSwinModel`].
"""
def __init__(self, config: MaskFormerSwinConfig):
super().__init__(config)
super()._init_backbone(config)
self.model = MaskFormerSwinModel(config)
if 'stem' in self.out_features:
raise ValueError("This backbone does not support 'stem' in the `out_features`.")
self.num_features = [config.embed_dim] + [int(config.embed_dim * 2 ** i) for i in range(len(config.depths))]
self.hidden_states_norms = nn.ModuleList([nn.LayerNorm(num_channels) for num_channels in self.num_features[1:]])
self.post_init()
def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
outputs = self.model(pixel_values, output_hidden_states=True, output_attentions=output_attentions, return_dict=True)
hidden_states = outputs.hidden_states[1:]
spatial_dimensions: tuple[tuple[int, int]] = outputs.hidden_states_spatial_dimensions
feature_maps = ()
for i, (hidden_state, stage, (height, width)) in enumerate(zip(hidden_states, self.stage_names[1:], spatial_dimensions)):
norm = self.hidden_states_norms[i]
hidden_state_unpolled = hidden_state[-1]
hidden_state_norm = norm(hidden_state_unpolled)
batch_size, _, hidden_size = hidden_state_norm.shape
hidden_state_permuted = hidden_state_norm.permute(0, 2, 1).view((batch_size, hidden_size, height, width)).contiguous()
if stage in self.out_features:
feature_maps += (hidden_state_permuted,)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
if output_attentions:
output += (outputs.attentions,)
return output
return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
|
class MaskFormerSwinBackbone(MaskFormerSwinPreTrainedModel, BackboneMixin):
'''
MaskFormerSwin backbone, designed especially for the MaskFormer framework.
This classes reshapes `hidden_states` from (`batch_size, sequence_length, hidden_size)` to (`batch_size,
num_channels, height, width)`). It also adds additional layernorms after each stage.
Args:
config (`MaskFormerSwinConfig`):
The configuration used by [`MaskFormerSwinModel`].
'''
def __init__(self, config: MaskFormerSwinConfig):
pass
def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
pass
| 3
| 1
| 34
| 4
| 27
| 4
| 6
| 0.28
| 2
| 11
| 3
| 0
| 2
| 3
| 2
| 15
| 80
| 11
| 54
| 23
| 45
| 15
| 34
| 17
| 31
| 10
| 2
| 2
| 12
|
3,683
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinBaseModelOutput
|
from dataclasses import dataclass
from ...utils import auto_docstring, torch_int
from typing import Optional
import torch
from ...file_utils import ModelOutput
@dataclass
@auto_docstring(custom_intro="\n Class for SwinEncoder's outputs.\n ")
class MaskFormerSwinBaseModelOutput(ModelOutput):
"""
hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*):
A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to
`batch, channels, height, width`. Due to padding, their spatial size cannot inferred before the `forward`
method.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
hidden_states_spatial_dimensions: tuple[tuple[int, int]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Class for SwinEncoder's outputs.\n ")
class MaskFormerSwinBaseModelOutput(ModelOutput):
'''
hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*):
A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to
`batch, channels, height, width`. Due to padding, their spatial size cannot inferred before the `forward`
method.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 4
| 5
| 5
| 4
| 19
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
3,684
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinDropPath
|
import torch
from torch import Tensor, nn
from typing import Optional
class MaskFormerSwinDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class MaskFormerSwinDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
3,685
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinEmbeddings
|
from torch import Tensor, nn
import torch
from ...utils import auto_docstring, torch_int
class MaskFormerSwinEmbeddings(nn.Module):
"""
Construct the patch and position embeddings.
"""
def __init__(self, config):
super().__init__()
self.patch_embeddings = MaskFormerSwinPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.patch_grid = self.patch_embeddings.grid_size
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values, interpolate_pos_encoding):
_, num_channels, height, width = pixel_values.shape
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
if self.position_embeddings is not None:
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return (embeddings, output_dimensions)
|
class MaskFormerSwinEmbeddings(nn.Module):
'''
Construct the patch and position embeddings.
'''
def __init__(self, config):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values, interpolate_pos_encoding):
pass
| 4
| 2
| 23
| 5
| 15
| 3
| 2
| 0.27
| 1
| 4
| 1
| 0
| 3
| 6
| 3
| 13
| 76
| 19
| 45
| 21
| 41
| 12
| 38
| 21
| 34
| 3
| 1
| 2
| 7
|
3,686
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinEncoder
|
import torch
from torch import Tensor, nn
class MaskFormerSwinEncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')]
self.layers = nn.ModuleList([MaskFormerSwinStage(config=config, dim=int(config.embed_dim * 2 ** i_layer), input_resolution=(grid_size[0] // 2 ** i_layer, grid_size[1] // 2 ** i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=MaskFormerSwinPatchMerging if i_layer < self.num_layers - 1 else None) for i_layer in range(self.num_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
all_input_dimensions = ()
all_self_attentions = () if output_attentions else None
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
for i, layer_module in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_hidden_states, output_dimensions, layer_all_hidden_states = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, output_hidden_states)
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
all_input_dimensions += (input_dimensions,)
if output_hidden_states:
all_hidden_states += (layer_all_hidden_states,)
hidden_states = layer_hidden_states
if output_attentions:
all_self_attentions = all_self_attentions + (layer_all_hidden_states[1],)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return MaskFormerSwinBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, hidden_states_spatial_dimensions=all_input_dimensions, attentions=all_self_attentions)
|
class MaskFormerSwinEncoder(nn.Module):
def __init__(self, config, grid_size):
pass
def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
pass
| 3
| 0
| 38
| 5
| 33
| 0
| 6
| 0.01
| 1
| 8
| 3
| 0
| 2
| 4
| 2
| 12
| 78
| 10
| 67
| 22
| 56
| 1
| 29
| 14
| 26
| 10
| 1
| 2
| 12
|
3,687
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinIntermediate
|
from torch import Tensor, nn
import torch
from ...activations import ACT2FN
class MaskFormerSwinIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class MaskFormerSwinIntermediate(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
3,688
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinLayer
|
import torch
from torch import Tensor, nn
class MaskFormerSwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0):
super().__init__()
self.shift_size = shift_size
self.window_size = config.window_size
self.input_resolution = input_resolution
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.attention = MaskFormerSwinAttention(config, dim, num_heads, self.window_size)
self.drop_path = MaskFormerSwinDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = MaskFormerSwinIntermediate(config, dim)
self.output = MaskFormerSwinOutput(config, dim)
def get_attn_mask(self, input_resolution):
if self.shift_size > 0:
height, width = input_resolution
img_mask = torch.zeros((1, height, width, 1))
height_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))
width_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0)
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_left = pad_top = 0
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, pad_left, pad_right, pad_top, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return (hidden_states, pad_values)
def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False):
height, width = input_dimensions
batch_size, dim, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
hidden_states = hidden_states.view(batch_size, height, width, channels)
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask((height_pad, width_pad))
if attn_mask is not None:
attn_mask = attn_mask.to(hidden_states_windows.device)
self_attention_outputs = self.attention(hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = shortcut + self.drop_path(attention_windows)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = hidden_states + self.output(layer_output)
outputs = (layer_output,) + outputs
return outputs
|
class MaskFormerSwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0):
pass
def get_attn_mask(self, input_resolution):
pass
def maybe_pad(self, hidden_states, height, width):
pass
def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False):
pass
| 5
| 0
| 26
| 4
| 21
| 2
| 3
| 0.08
| 1
| 7
| 4
| 0
| 4
| 9
| 4
| 14
| 108
| 18
| 85
| 42
| 80
| 7
| 70
| 42
| 65
| 5
| 1
| 3
| 12
|
3,689
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinModel
|
from torch import Tensor, nn
import torch
class MaskFormerSwinModel(MaskFormerSwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.num_layers = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
self.embeddings = MaskFormerSwinEmbeddings(config)
self.encoder = MaskFormerSwinEncoder(config, self.embeddings.patch_grid)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, pixel_values=None, head_mask=None, output_attentions=None, output_hidden_states=None, interpolate_pos_encoding=False, return_dict=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
head_mask = self.get_head_mask(head_mask, len(self.config.depths))
embedding_output, input_dimensions = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
encoder_outputs = self.encoder(embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs.last_hidden_state if return_dict else encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
hidden_states_spatial_dimensions = (input_dimensions,) + encoder_outputs.hidden_states_spatial_dimensions
return MaskFormerSwinModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, hidden_states_spatial_dimensions=hidden_states_spatial_dimensions, attentions=encoder_outputs.attentions)
|
class MaskFormerSwinModel(MaskFormerSwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
pass
def get_input_embeddings(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
def forward(self, pixel_values=None, head_mask=None, output_attentions=None, output_hidden_states=None, interpolate_pos_encoding=False, return_dict=None):
pass
| 5
| 1
| 20
| 3
| 15
| 2
| 3
| 0.15
| 1
| 6
| 3
| 0
| 4
| 7
| 4
| 5
| 82
| 14
| 59
| 26
| 46
| 9
| 34
| 18
| 29
| 8
| 2
| 1
| 13
|
3,690
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinModelOutputWithPooling
|
from ...file_utils import ModelOutput
from dataclasses import dataclass
from ...utils import auto_docstring, torch_int
from typing import Optional
import torch
@dataclass
@auto_docstring(custom_intro="\n Class for MaskFormerSwinModel's outputs that also contains the spatial dimensions of the hidden states.\n ")
class MaskFormerSwinModelOutputWithPooling(ModelOutput):
"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state after a mean pooling operation.
hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*):
A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to
`batch, channels, height, width`. Due to padding, their spatial size cannot be inferred before the
`forward` method.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
hidden_states_spatial_dimensions: tuple[tuple[int, int]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Class for MaskFormerSwinModel's outputs that also contains the spatial dimensions of the hidden states.\n ")
class MaskFormerSwinModelOutputWithPooling(ModelOutput):
'''
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state after a mean pooling operation.
hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*):
A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to
`batch, channels, height, width`. Due to padding, their spatial size cannot be inferred before the
`forward` method.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 4
| 6
| 6
| 5
| 21
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
3,691
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinOutput
|
import torch
from torch import Tensor, nn
class MaskFormerSwinOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class MaskFormerSwinOutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
3,692
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinPatchEmbeddings
|
import collections.abc
from torch import Tensor, nn
import torch
from typing import Optional
class MaskFormerSwinPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.embed_dim)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
_, num_channels, height, width = pixel_values.shape
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
return (embeddings, output_dimensions)
|
class MaskFormerSwinPatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config):
pass
def maybe_pad(self, pixel_values, height, width):
pass
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
pass
| 4
| 1
| 11
| 1
| 10
| 0
| 2
| 0.2
| 1
| 4
| 0
| 0
| 3
| 6
| 3
| 13
| 41
| 5
| 30
| 17
| 26
| 6
| 30
| 17
| 26
| 3
| 1
| 1
| 7
|
3,693
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinPatchMerging
|
from torch import Tensor, nn
import torch
class MaskFormerSwinPatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = height % 2 == 1 or width % 2 == 1
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
input_feature = self.maybe_pad(input_feature, height, width)
input_feature_0 = input_feature[:, 0::2, 0::2, :]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels)
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature
|
class MaskFormerSwinPatchMerging(nn.Module):
'''
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
'''
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
pass
def maybe_pad(self, input_feature, height, width):
pass
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
pass
| 4
| 1
| 12
| 1
| 9
| 3
| 1
| 0.67
| 1
| 3
| 0
| 0
| 3
| 4
| 3
| 13
| 52
| 8
| 27
| 16
| 23
| 18
| 27
| 16
| 23
| 2
| 1
| 1
| 4
|
3,694
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinPreTrainedModel
|
from torch import Tensor, nn
from ...utils import auto_docstring, torch_int
from ...modeling_utils import PreTrainedModel
from .configuration_maskformer_swin import MaskFormerSwinConfig
@auto_docstring
class MaskFormerSwinPreTrainedModel(PreTrainedModel):
config: MaskFormerSwinConfig
base_model_prefix = 'model'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['MaskFormerSwinStage']
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, MaskFormerSwinEmbeddings):
if module.position_embeddings is not None:
module.position_embeddings.data.zero_()
elif isinstance(module, MaskFormerSwinSelfAttention):
module.relative_position_bias_table.data.zero_()
|
@auto_docstring
class MaskFormerSwinPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.5
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 23
| 2
| 14
| 7
| 12
| 7
| 13
| 7
| 11
| 4
| 1
| 2
| 4
|
3,695
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinSelfAttention
|
from torch import Tensor, nn
from typing import Optional
import torch
import collections.abc
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
import math
class MaskFormerSwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
if dim % num_heads != 0:
raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})')
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads))
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing='ij'))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer('relative_position_index', relative_position_index)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
hidden_shape = (batch_size, dim, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class MaskFormerSwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 32
| 6
| 24
| 2
| 3
| 0.1
| 1
| 6
| 0
| 0
| 3
| 9
| 3
| 13
| 98
| 19
| 72
| 38
| 62
| 7
| 56
| 32
| 52
| 4
| 1
| 1
| 8
|
3,696
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinSelfOutput
|
import torch
from torch import Tensor, nn
class MaskFormerSwinSelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class MaskFormerSwinSelfOutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 11
| 2
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
3,697
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/maskformer/modeling_maskformer_swin.py
|
transformers.models.maskformer.modeling_maskformer_swin.MaskFormerSwinStage
|
from torch import Tensor, nn
from ...modeling_layers import GradientCheckpointingLayer
class MaskFormerSwinStage(GradientCheckpointingLayer):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
super().__init__()
self.config = config
self.dim = dim
self.blocks = nn.ModuleList([MaskFormerSwinLayer(config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, drop_path_rate=drop_path[i], shift_size=0 if i % 2 == 0 else config.window_size // 2) for i in range(depth)])
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False, output_hidden_states=False):
all_hidden_states = () if output_hidden_states else None
height, width = input_dimensions
for i, block_module in enumerate(self.blocks):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
block_hidden_states = block_module(hidden_states, input_dimensions, layer_head_mask, output_attentions)
hidden_states = block_hidden_states[0]
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.downsample is not None:
height_downsampled, width_downsampled = ((height + 1) // 2, (width + 1) // 2)
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(hidden_states, input_dimensions)
else:
output_dimensions = (height, width, height, width)
return (hidden_states, output_dimensions, all_hidden_states)
|
class MaskFormerSwinStage(GradientCheckpointingLayer):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
pass
def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False, output_hidden_states=False):
pass
| 3
| 0
| 26
| 5
| 21
| 1
| 5
| 0.05
| 1
| 4
| 1
| 0
| 2
| 5
| 2
| 12
| 55
| 10
| 43
| 17
| 38
| 2
| 27
| 15
| 24
| 7
| 1
| 2
| 10
|
3,698
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mbart/configuration_mbart.py
|
transformers.models.mbart.configuration_mbart.MBartConfig
|
from ...configuration_utils import PretrainedConfig
class MBartConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MBartModel`]. It is used to instantiate an MBART
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MBART
[facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MBartModel`] or [`TFMBartModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 2):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Example:
```python
>>> from transformers import MBartConfig, MBartModel
>>> # Initializing a MBART facebook/mbart-large-cc25 style configuration
>>> configuration = MBartConfig()
>>> # Initializing a model (with random weights) from the facebook/mbart-large-cc25 style configuration
>>> model = MBartModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'mbart'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, forced_eos_token_id=forced_eos_token_id, **kwargs)
|
class MBartConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MBartModel`]. It is used to instantiate an MBART
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MBART
[facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MBartModel`] or [`TFMBartModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 2):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Example:
```python
>>> from transformers import MBartConfig, MBartModel
>>> # Initializing a MBART facebook/mbart-large-cc25 style configuration
>>> configuration = MBartConfig()
>>> # Initializing a model (with random weights) from the facebook/mbart-large-cc25 style configuration
>>> model = MBartModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **kwargs):
pass
| 2
| 1
| 56
| 0
| 56
| 1
| 1
| 1.08
| 1
| 1
| 0
| 0
| 1
| 20
| 1
| 1
| 134
| 10
| 60
| 52
| 31
| 65
| 26
| 25
| 24
| 1
| 1
| 0
| 1
|
3,699
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mbart/configuration_mbart.py
|
transformers.models.mbart.configuration_mbart.MBartOnnxConfig
|
from ...onnx.utils import compute_effective_axis_dimension
from ... import PreTrainedTokenizer
from typing import Any
from collections import OrderedDict
from ...utils import is_torch_available, logging
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
from collections.abc import Mapping
class MBartOnnxConfig(OnnxSeq2SeqConfigWithPast):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task in ['default', 'seq2seq-lm']:
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
common_inputs['decoder_input_ids'] = {0: 'batch'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
elif self.task == 'causal-lm':
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
num_encoder_layers, _ = self.num_layers
for i in range(num_encoder_layers):
common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})])
return common_inputs
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task in ['default', 'seq2seq-lm']:
common_outputs = super().outputs
else:
common_outputs = super(OnnxConfigWithPast, self).outputs
if self.use_past:
num_encoder_layers, _ = self.num_layers
for i in range(num_encoder_layers):
common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair)
decoder_seq_length = seq_length if not self.use_past else 1
decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair)
decoder_inputs = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
batch, encoder_seq_length = common_inputs['input_ids'].shape
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1]
num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads)
decoder_past_length = decoder_seq_length + 3
decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads)
common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1)
common_inputs['past_key_values'] = []
num_encoder_layers, num_decoder_layers = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
remaining_side_name = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(min_num_layers):
common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape)))
shape = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(min_num_layers, max_num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
batch, seqlen = common_inputs['input_ids'].shape
past_key_values_length = seqlen + 2
num_encoder_layers, _ = self.num_layers
num_encoder_attention_heads, _ = self.num_attention_heads
past_shape = (batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads)
mask_dtype = common_inputs['attention_mask'].dtype
common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)]
return common_inputs
def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add)
dummy_input = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size
common_inputs = dict(tokenizer(dummy_input, return_tensors='pt'))
return common_inputs
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
if self.task in ['default', 'seq2seq-lm']:
common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair)
elif self.task == 'causal-lm':
common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair)
else:
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair)
return common_inputs
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
if self.task in ['default', 'seq2seq-lm']:
flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
else:
flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t)
|
class MBartOnnxConfig(OnnxSeq2SeqConfigWithPast):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
pass
def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
pass
def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
pass
def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
pass
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
pass
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
pass
| 10
| 0
| 30
| 2
| 27
| 1
| 4
| 0.05
| 1
| 9
| 0
| 0
| 7
| 1
| 7
| 7
| 221
| 20
| 191
| 73
| 151
| 10
| 89
| 42
| 79
| 8
| 1
| 3
| 28
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.