id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
3,400
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/tokenization_llama.py
transformers.models.llama.tokenization_llama.LlamaTokenizer
from ...convert_slow_tokenizer import import_protobuf from ...tokenization_utils import AddedToken, PreTrainedTokenizer from typing import TYPE_CHECKING, Any, Optional import os import sentencepiece as spm from ...utils.import_utils import requires from shutil import copyfile @requires(backends=('sentencepiece',)) class LlamaTokenizer(PreTrainedTokenizer): """ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is no padding token in the original model. Args: vocab_file (`str`): Path to the vocabulary file. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. sp_model_kwargs (`dict[str, Any]`, `Optional`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Llama should be used. spaces_between_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to add spaces between special tokens. legacy (`bool`, *optional*): Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622 and #25224 which includes fixes to properly handle tokens that appear after special tokens. Make sure to also set `from_slow` to `True`. A simple example: - `legacy=True`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 869 is '▁.' [1, 15043, 29871, 1, 869] ``` - `legacy=False`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 29889 is '.' [1, 15043, 29871, 1, 29889] ``` Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*, defaults to `True`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. Again, this should be set with `from_slow=True` to make sure it's taken into account. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, unk_token='<unk>', bos_token='<s>', eos_token='</s>', pad_token=None, sp_model_kwargs: Optional[dict[str, Any]]=None, add_bos_token=True, add_eos_token=False, clean_up_tokenization_spaces=False, use_default_system_prompt=False, spaces_between_special_tokens=False, legacy=None, add_prefix_space=True, **kwargs): self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token if legacy is None: logger.warning_once(f'You are using the default legacy behaviour of the {self.__class__}. This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file you can ignore this message') legacy = True self.legacy = legacy self.vocab_file = vocab_file self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self.use_default_system_prompt = use_default_system_prompt self.sp_model = self.get_spm_processor(kwargs.pop('from_slow', False)) self.add_prefix_space = add_prefix_space super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, sp_model_kwargs=self.sp_model_kwargs, clean_up_tokenization_spaces=clean_up_tokenization_spaces, use_default_system_prompt=use_default_system_prompt, spaces_between_special_tokens=spaces_between_special_tokens, legacy=legacy, add_prefix_space=add_prefix_space, **kwargs) @property def unk_token_length(self): return len(self.sp_model.encode(str(self.unk_token))) def get_spm_processor(self, from_slow=False): tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs) if self.legacy or from_slow: tokenizer.Load(self.vocab_file) return tokenizer with open(self.vocab_file, 'rb') as f: sp_model = f.read() model_pb2 = import_protobuf(f'The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)') model = model_pb2.ModelProto.FromString(sp_model) normalizer_spec = model_pb2.NormalizerSpec() normalizer_spec.add_dummy_prefix = False model.normalizer_spec.MergeFrom(normalizer_spec) sp_model = model.SerializeToString() tokenizer.LoadFromSerializedProto(sp_model) return tokenizer def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None state['sp_model_proto'] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__.update(d) self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def vocab_size(self): """Returns vocab size""" return self.sp_model.get_piece_size() def get_vocab(self): """Returns vocab as a dict""" vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def tokenize(self, text: 'TextInput', **kwargs) -> list[str]: """ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the first token is special. """ if self.legacy or len(text) == 0: return super().tokenize(text, **kwargs) text = text.replace(SPIECE_UNDERLINE, ' ') if self.add_prefix_space: text = SPIECE_UNDERLINE + text tokens = super().tokenize(text, **kwargs) if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and (tokens[1] in self.all_special_tokens): tokens = tokens[1:] return tokens def _tokenize(self, text, **kwargs): """ Returns a tokenized string. We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`. `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`. """ if self.legacy or not text.startswith((SPIECE_UNDERLINE, ' ')): return self.sp_model.encode(text, out_type=str) tokens = self.sp_model.encode(self.unk_token + text, out_type=str) return tokens[self.unk_token_length:] if len(tokens) >= self.unk_token_length else tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space: tokens[0] = tokens[0][1:] current_sub_tokens = [] out_string = '' prev_is_special = False for i, token in enumerate(tokens): if token in self.all_special_tokens: if not prev_is_special and i != 0 and self.legacy: out_string += ' ' out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: if prev_is_special and i == 1 and self.add_prefix_space and (not token.startswith(SPIECE_UNDERLINE)): out_string += ' ' current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string def save_vocabulary(self, save_directory, filename_prefix: Optional[str]=None) -> tuple[str]: """ Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. Returns: `Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + [0] * len(token_ids_0) + eos_token_id return bos_token_id + [0] * len(token_ids_0) + eos_token_id + bos_token_id + [0] * len(token_ids_1) + eos_token_id def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`list[int]`): List of ids. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) if token_ids_1 is not None: output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output
@requires(backends=('sentencepiece',)) class LlamaTokenizer(PreTrainedTokenizer): ''' Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is no padding token in the original model. Args: vocab_file (`str`): Path to the vocabulary file. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. sp_model_kwargs (`dict[str, Any]`, `Optional`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Llama should be used. spaces_between_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to add spaces between special tokens. legacy (`bool`, *optional*): Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622 and #25224 which includes fixes to properly handle tokens that appear after special tokens. Make sure to also set `from_slow` to `True`. A simple example: - `legacy=True`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 869 is '▁.' [1, 15043, 29871, 1, 869] ``` - `legacy=False`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 29889 is '.' [1, 15043, 29871, 1, 29889] ``` Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*, defaults to `True`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. Again, this should be set with `from_slow=True` to make sure it's taken into account. ''' def __init__(self, vocab_file, unk_token='<unk>', bos_token='<s>', eos_token='</s>', pad_token=None, sp_model_kwargs: Optional[dict[str, Any]]=None, add_bos_token=True, add_eos_token=False, clean_up_tokenization_spaces=False, use_default_system_prompt=False, spaces_between_special_tokens=False, legacy=None, add_prefix_space=True, **kwargs): pass @property def unk_token_length(self): pass def get_spm_processor(self, from_slow=False): pass def __getstate__(self): pass def __setstate__(self, d): pass @property def vocab_size(self): '''Returns vocab size''' pass def get_vocab(self): '''Returns vocab as a dict''' pass def tokenize(self, text: 'TextInput', **kwargs) -> list[str]: ''' Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the first token is special. ''' pass def _tokenize(self, text, **kwargs): ''' Returns a tokenized string. We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`. `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`. ''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def save_vocabulary(self, save_directory, filename_prefix: Optional[str]=None) -> tuple[str]: ''' Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. Returns: `Tuple(str)`: Paths to the files saved. ''' pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`list[int]`): List of ids. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). ''' pass
20
11
16
2
11
4
3
0.69
1
7
0
1
16
8
16
105
357
53
180
75
141
125
124
51
107
7
3
3
47
3,401
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llama/tokenization_llama_fast.py
transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast
from tokenizers import processors from ...tokenization_utils_fast import PreTrainedTokenizerFast from typing import Optional from shutil import copyfile import os class LlamaTokenizerFast(PreTrainedTokenizerFast): """ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. This uses notably ByteFallback and no normalization. ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer") >>> tokenizer.encode("Hello this is a test") [1, 15043, 445, 338, 263, 1243] ``` If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the values of the first token and final token of an encoded sequence will not be correct). For more details, checkout [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that contains the vocabulary necessary to instantiate a tokenizer. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`): The end of sequence token. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Llama should be used legacy (`bool`, *optional*): Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622 and #25224 which includes fixes to properly handle tokens that appear after special tokens. Make sure to also set `from_slow` to `True`. A simple example: - `legacy=True`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 869 is '▁.' [1, 15043, 29871, 1, 869] ``` - `legacy=False`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 29889 is '.' [1, 15043, 29871, 1, 29889] ``` Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*): Whether or not the tokenizer should automatically add a prefix space """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = LlamaTokenizer padding_side = 'left' model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token='<unk>', bos_token='<s>', eos_token='</s>', add_bos_token=True, add_eos_token=False, use_default_system_prompt=False, legacy=None, add_prefix_space=None, **kwargs): if legacy is None: logger.warning_once(f'You are using the default legacy behaviour of the {self.__class__}. This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file you can ignore this message.') legacy = True self.legacy = legacy if add_prefix_space is not None: kwargs['from_slow'] = True super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, use_default_system_prompt=use_default_system_prompt, add_prefix_space=add_prefix_space, legacy=legacy, **kwargs) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() self.use_default_system_prompt = use_default_system_prompt self.vocab_file = vocab_file def update_post_processor(self): """ Updates the underlying post processor with the current `bos_token` and `eos_token`. """ bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError('add_bos_token = True but bos_token = None') eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError('add_eos_token = True but eos_token = None') single = f"{(bos + ':0 ' if self.add_bos_token else '')}$A:0{(' ' + eos + ':0' if self.add_eos_token else '')}" pair = f"{single}{(' ' + bos + ':1' if self.add_bos_token else '')} $B:1{(' ' + eos + ':1' if self.add_eos_token else '')}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing(single=single, pair=pair, special_tokens=special_tokens) @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.') if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output
class LlamaTokenizerFast(PreTrainedTokenizerFast): ''' Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. This uses notably ByteFallback and no normalization. ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer") >>> tokenizer.encode("Hello this is a test") [1, 15043, 445, 338, 263, 1243] ``` If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the values of the first token and final token of an encoded sequence will not be correct). For more details, checkout [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that contains the vocabulary necessary to instantiate a tokenizer. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`): The end of sequence token. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Llama should be used legacy (`bool`, *optional*): Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622 and #25224 which includes fixes to properly handle tokens that appear after special tokens. Make sure to also set `from_slow` to `True`. A simple example: - `legacy=True`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 869 is '▁.' [1, 15043, 29871, 1, 869] ``` - `legacy=False`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 29889 is '.' [1, 15043, 29871, 1, 29889] ``` Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*): Whether or not the tokenizer should automatically add a prefix space ''' def __init__(self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token='<unk>', bos_token='<s>', eos_token='</s>', add_bos_token=True, add_eos_token=False, use_default_system_prompt=False, legacy=None, add_prefix_space=None, **kwargs): pass def update_post_processor(self): ''' Updates the underlying post processor with the current `bos_token` and `eos_token`. ''' pass @property def add_eos_token(self): pass @property def add_bos_token(self): pass @add_eos_token.setter def add_eos_token(self): pass @add_bos_token.setter def add_bos_token(self): pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): pass
13
2
13
1
11
0
3
0.6
1
4
0
0
9
5
9
97
207
31
110
49
81
66
64
30
54
9
3
1
27
3,402
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava/configuration_llava.py
transformers.models.llava.configuration_llava.LlavaConfig
from ..auto import CONFIG_MAPPING, AutoConfig from ...configuration_utils import PretrainedConfig class LlavaConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`LlavaForConditionalGeneration`]. It is used to instantiate an Llava model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Llava-9B. e.g. [llava-hf/llava-9b](https://huggingface.co/llava-hf/llava-9b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 32000): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. image_seq_length (`int`, *optional*, defaults to 576): Sequence length of one image embedding. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. Example: ```python >>> from transformers import LlavaForConditionalGeneration, LlavaConfig, CLIPVisionConfig, LlamaConfig >>> # Initializing a CLIP-vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> # Initializing a Llava llava-1.5-7b style configuration >>> configuration = LlavaConfig(vision_config, text_config) >>> # Initializing a model from the llava-1.5-7b style configuration >>> model = LlavaForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'llava' attribute_map = {'image_token_id': 'image_token_index'} sub_configs = {'text_config': AutoConfig, 'vision_config': AutoConfig} def __init__(self, vision_config=None, text_config=None, image_token_index=32000, projector_hidden_act='gelu', vision_feature_select_strategy='default', vision_feature_layer=-2, image_seq_length=576, multimodal_projector_bias=True, **kwargs): self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.image_seq_length = image_seq_length if vision_feature_select_strategy not in ['default', 'full']: raise ValueError(f"vision_feature_select_strategy should be one of 'default', 'full'.Got: {vision_feature_select_strategy}") self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer if isinstance(vision_config, dict): vision_config['model_type'] = vision_config.get('model_type', 'clip_vision_model') vision_config = CONFIG_MAPPING[vision_config['model_type']](**vision_config) elif vision_config is None: vision_config = CONFIG_MAPPING['clip_vision_model'](intermediate_size=4096, hidden_size=1024, patch_size=14, image_size=336, num_hidden_layers=24, num_attention_heads=16, vocab_size=32000, projection_dim=768) self.vision_config = vision_config if isinstance(text_config, dict): text_config['model_type'] = text_config.get('model_type', 'llama') text_config = CONFIG_MAPPING[text_config['model_type']](**text_config) elif text_config is None: text_config = CONFIG_MAPPING['llama']() self.text_config = text_config self.multimodal_projector_bias = multimodal_projector_bias super().__init__(**kwargs)
class LlavaConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`LlavaForConditionalGeneration`]. It is used to instantiate an Llava model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Llava-9B. e.g. [llava-hf/llava-9b](https://huggingface.co/llava-hf/llava-9b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 32000): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. image_seq_length (`int`, *optional*, defaults to 576): Sequence length of one image embedding. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. Example: ```python >>> from transformers import LlavaForConditionalGeneration, LlavaConfig, CLIPVisionConfig, LlamaConfig >>> # Initializing a CLIP-vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> # Initializing a Llava llava-1.5-7b style configuration >>> configuration = LlavaConfig(vision_config, text_config) >>> # Initializing a model from the llava-1.5-7b style configuration >>> model = LlavaForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vision_config=None, text_config=None, image_token_index=32000, projector_hidden_act='gelu', vision_feature_select_strategy='default', vision_feature_layer=-2, image_seq_length=576, multimodal_projector_bias=True, **kwargs): pass
2
1
56
7
49
0
8
0.83
1
3
0
0
1
9
1
1
114
19
52
25
38
43
24
13
22
8
1
1
8
3,403
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava/image_processing_llava.py
transformers.models.llava.image_processing_llava.LlavaImageProcessor
from ...image_transforms import convert_to_rgb, get_resize_output_image_size, resize, to_channel_dimension_format from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...utils import TensorType, is_vision_available, logging from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments import numpy as np from typing import Optional, Union class LlavaImageProcessor(BaseImageProcessor): """ Constructs a LLaVa image processor. Args: do_pad (`bool`, *optional*, defaults to `False`): Whether to pad the image to a square based on the longest edge. The padding value is determined by the `image_mean` parameter. Can be overridden by `do_pad` in the `preprocess` method. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ['pixel_values'] def __init__(self, do_pad: bool=False, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size') self.do_pad = do_pad self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb self._valid_processor_keys = ['images', 'do_pad', 'do_resize', 'size', 'resample', 'do_center_crop', 'crop_size', 'do_rescale', 'rescale_factor', 'do_normalize', 'image_mean', 'image_std', 'do_convert_rgb', 'return_tensors', 'data_format', 'input_data_format'] def pad_to_square(self, image: np.ndarray, background_color: Union[int, tuple[int, int, int]]=0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array: """ Pads an image to a square based on the longest edge. Args: image (`np.ndarray`): The image to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. """ height, width = get_image_size(image, input_data_format) num_channels = image.shape[0] if input_data_format == ChannelDimension.FIRST else image.shape[-1] if height == width: image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image max_dim = max(height, width) if isinstance(background_color, int): background_color = [background_color] elif len(background_color) != num_channels: raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels') if input_data_format == ChannelDimension.FIRST: result = np.zeros((num_channels, max_dim, max_dim), dtype=image.dtype) for i, color in enumerate(background_color): result[i, :, :] = color if width > height: start = (max_dim - height) // 2 result[:, start:start + height, :] = image else: start = (max_dim - width) // 2 result[:, :, start:start + width] = image else: result = np.zeros((max_dim, max_dim, num_channels), dtype=image.dtype) for i, color in enumerate(background_color): result[:, :, i] = color if width > height: start = (max_dim - height) // 2 result[start:start + height, :, :] = image else: start = (max_dim - width) // 2 result[:, start:start + width, :] = image image = to_channel_dimension_format(result, data_format, input_data_format) if data_format is not None else result return image def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ default_to_square = True if 'shortest_edge' in size: size = size['shortest_edge'] default_to_square = False elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def preprocess(self, images: ImageInput, do_pad: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image to a square based on the longest edge. The padding value is determined by the `image_mean` parameter. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_pad = do_pad if do_pad is not None else self.do_pad do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name='size', default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) images = self.fetch_images(images) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) processed_images = [] for image in images: if do_pad: image = self.pad_to_square(image=image, background_color=tuple((int(x * 255) for x in self.image_mean)), input_data_format=input_data_format) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) processed_images.append(image) return BatchFeature(data={'pixel_values': processed_images}, tensor_type=return_tensors)
class LlavaImageProcessor(BaseImageProcessor): ''' Constructs a LLaVa image processor. Args: do_pad (`bool`, *optional*, defaults to `False`): Whether to pad the image to a square based on the longest edge. The padding value is determined by the `image_mean` parameter. Can be overridden by `do_pad` in the `preprocess` method. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. ''' def __init__(self, do_pad: bool=False, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: pass def pad_to_square(self, image: np.ndarray, background_color: Union[int, tuple[int, int, int]]=0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array: ''' Pads an image to a square based on the longest edge. Args: image (`np.ndarray`): The image to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. ''' pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass def preprocess(self, images: ImageInput, do_pad: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> PIL.Image.Image: ''' Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image to a square based on the longest edge. The padding value is determined by the `image_mean` parameter. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
5
4
83
6
53
24
11
0.63
1
11
3
0
4
13
4
24
381
31
215
76
162
135
101
28
96
23
3
2
43
3,404
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava/image_processing_llava_fast.py
transformers.models.llava.image_processing_llava_fast.LlavaImageProcessorFast
from ...image_processing_utils import BatchFeature from ...processing_utils import Unpack import torch from typing import Optional, Union from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, get_image_size from ...utils import TensorType, auto_docstring, is_torchvision_v2_available from ...image_processing_utils_fast import BaseImageProcessorFast, DefaultFastImageProcessorKwargs, group_images_by_shape, reorder_images @auto_docstring class LlavaImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {'shortest_edge': 224} default_to_square = False crop_size = {'height': 224, 'width': 224} do_pad = False do_resize = True do_center_crop = True do_rescale = True do_normalize = True do_convert_rgb = True valid_kwargs = LlavaFastImageProcessorKwargs def __init__(self, **kwargs: Unpack[LlavaFastImageProcessorKwargs]) -> None: super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaFastImageProcessorKwargs]) -> BatchFeature: return super().preprocess(images, **kwargs) def pad_to_square(self, images: 'torch.Tensor', background_color: Union[int, tuple[int, int, int]]=0) -> 'torch.Tensor': """ Pads an image to a square based on the longest edge. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. """ height, width = get_image_size(images, ChannelDimension.FIRST) if height == width: return images num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0] if isinstance(background_color, int): background_color = [background_color] + [0] * (num_channels - 1) elif len(background_color) != num_channels: raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels') max_dim = max(height, width) paste_x_left = (max_dim - width) // 2 paste_y_left = (max_dim - height) // 2 paste_x_right = max_dim - width - paste_x_left paste_y_right = max_dim - height - paste_y_left padded_images = F.pad(images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color) return padded_images def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: SizeDict, interpolation: Optional['F.InterpolationMode'], do_pad: bool, do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_pad: stacked_images = self.pad_to_square(images=stacked_images, background_color=tuple((int(x * 255) for x in self.image_mean))) resized_images_grouped[shape] = stacked_images padded_images = reorder_images(resized_images_grouped, grouped_images_index) grouped_images, grouped_images_index = group_images_by_shape(padded_images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_center_crop: stacked_images = self.center_crop(stacked_images, crop_size) stacked_images = self.rescale_and_normalize(stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={'pixel_values': processed_images}, tensor_type=return_tensors)
@auto_docstring class LlavaImageProcessorFast(BaseImageProcessorFast): def __init__(self, **kwargs: Unpack[LlavaFastImageProcessorKwargs]) -> None: pass @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaFastImageProcessorKwargs]) -> BatchFeature: pass def pad_to_square(self, images: 'torch.Tensor', background_color: Union[int, tuple[int, int, int]]=0) -> 'torch.Tensor': ''' Pads an image to a square based on the longest edge. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. ''' pass def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: SizeDict, interpolation: Optional['F.InterpolationMode'], do_pad: bool, do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: pass
7
1
25
2
19
5
4
0.19
1
12
5
0
4
0
4
38
127
13
96
62
63
18
59
34
54
8
4
2
15
3,405
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava/modeling_llava.py
transformers.models.llava.modeling_llava.LlavaCausalLMOutputWithPast
from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from ...cache_utils import Cache import torch from typing import Optional, Union from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from dataclasses import dataclass @dataclass @auto_docstring(custom_intro='\n Base class for Llava causal language model (or autoregressive) outputs.\n ') class LlavaCausalLMOutputWithPast(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring(custom_intro='\n Base class for Llava causal language model (or autoregressive) outputs.\n ') class LlavaCausalLMOutputWithPast(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. ''' pass
3
1
0
0
0
0
0
3.57
1
0
0
1
0
0
0
0
37
5
7
7
6
25
7
7
6
0
1
0
0
3,406
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava/modeling_llava.py
transformers.models.llava.modeling_llava.LlavaForConditionalGeneration
from .configuration_llava import LlavaConfig from torch import nn from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...cache_utils import Cache import torch from typing import Optional, Union from ...generation import GenerationMixin @auto_docstring(custom_intro='\n The LLAVA model which consists of a vision backbone and a language model.\n ') class LlavaForConditionalGeneration(LlavaPreTrainedModel, GenerationMixin): _checkpoint_conversion_mapping = {'^language_model.model': 'model.language_model', '^vision_tower': 'model.vision_tower', '^multi_modal_projector': 'model.multi_modal_projector', '^language_model.lm_head': 'lm_head'} _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: LlavaConfig): super().__init__(config) self.model = LlavaModel(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def get_output_embeddings(self) -> nn.Module: return self.lm_head def set_decoder(self, decoder): self.model.set_decoder(decoder) def get_decoder(self): return self.model.get_decoder() def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, **kwargs): return self.model.get_image_features(pixel_values=pixel_values, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, **kwargs) @property def language_model(self): return self.model.language_model @property def vision_tower(self): return self.model.vision_tower @property def multi_modal_projector(self): return self.model.multi_modal_projector @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, labels: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, image_sizes: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, LlavaCausalLMOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, LlavaForConditionalGeneration >>> model = LlavaForConditionalGeneration.from_pretrained("llava-hf/llava-1.5-7b-hf") >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf") >>> prompt = "USER: <image>\\nWhat's the content of the image? ASSISTANT:" >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, text=prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_new_tokens=15) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER: \\nWhat's the content of the image? ASSISTANT: The image features a busy city street with a stop sign prominently displayed" ```""" vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, cache_position=cache_position, image_sizes=image_sizes, **kwargs) hidden_states = outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs) return LlavaCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs): model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs) if cache_position[0] == 0: model_inputs['pixel_values'] = pixel_values return model_inputs
@auto_docstring(custom_intro='\n The LLAVA model which consists of a vision backbone and a language model.\n ') class LlavaForConditionalGeneration(LlavaPreTrainedModel, GenerationMixin): def __init__(self, config: LlavaConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_output_embeddings(self) -> nn.Module: pass def set_decoder(self, decoder): pass def get_decoder(self): pass def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, **kwargs): pass @property def language_model(self): pass @property def vision_tower(self): pass @property def multi_modal_projector(self): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, labels: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, image_sizes: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, LlavaCausalLMOutputWithPast]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, LlavaForConditionalGeneration >>> model = LlavaForConditionalGeneration.from_pretrained("llava-hf/llava-1.5-7b-hf") >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf") >>> prompt = "USER: <image>\nWhat's the content of the image? ASSISTANT:" >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, text=prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_new_tokens=15) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER: \nWhat's the content of the image? ASSISTANT: The image features a busy city street with a stop sign prominently displayed" ```''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs): pass
19
1
30
4
20
6
4
0.31
2
11
5
0
11
6
11
12
345
51
225
91
175
70
123
55
111
16
2
2
39
3,407
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava/modeling_llava.py
transformers.models.llava.modeling_llava.LlavaMultiModalProjector
from torch import nn from .configuration_llava import LlavaConfig from ...activations import ACT2FN class LlavaMultiModalProjector(nn.Module): def __init__(self, config: LlavaConfig): super().__init__() num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer) self.linear_1 = nn.Linear(config.vision_config.hidden_size * num_feature_layers, config.text_config.hidden_size, bias=config.multimodal_projector_bias) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias) def forward(self, image_features): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states
class LlavaMultiModalProjector(nn.Module): def __init__(self, config: LlavaConfig): pass def forward(self, image_features): pass
3
0
9
0
9
1
2
0.06
1
3
1
0
2
3
2
12
20
1
18
8
15
1
12
8
9
2
1
0
3
3,408
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava/modeling_llava.py
transformers.models.llava.modeling_llava.LlavaPreTrainedModel
from ...modeling_utils import PreTrainedModel from .configuration_llava import LlavaConfig from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging @auto_docstring class LlavaPreTrainedModel(PreTrainedModel): config: LlavaConfig base_model_prefix = '' supports_gradient_checkpointing = True _skip_keys_device_placement = 'past_key_values' _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_flex_attn = True _supports_attention_backend = True
@auto_docstring class LlavaPreTrainedModel(PreTrainedModel): pass
2
0
21
2
16
3
7
0.12
1
0
0
1
1
0
1
1
31
3
25
11
23
3
20
11
18
7
1
2
7
3,409
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava/processing_llava.py
transformers.models.llava.processing_llava.LlavaProcessor
from ...image_utils import ImageInput, get_image_size, to_numpy_array from ...feature_extraction_utils import BatchFeature from ...tokenization_utils_base import PreTokenizedInput, TextInput from typing import Optional, Union import numpy as np from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack class LlavaProcessor(ProcessorMixin): """ Constructs a LLaVa processor which wraps a LLaVa image processor and a LLaMa tokenizer into a single processor. [`LlavaProcessor`] offers all the functionalities of [`LlavaImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaProcessor.__call__`] and [`~LlavaProcessor.decode`] for more information. Args: image_processor ([`LlavaImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. patch_size (`int`, *optional*): Patch size from the vision tower. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Should be same as in model's config chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. num_additional_image_tokens (`int`, *optional*, defaults to 0): Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other extra tokens appended, no need to set this arg. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = 'AutoImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor=None, tokenizer=None, patch_size=None, vision_feature_select_strategy=None, chat_template=None, image_token='<image>', num_additional_image_tokens=0, **kwargs): self.patch_size = patch_size self.num_additional_image_tokens = num_additional_image_tokens self.vision_feature_select_strategy = vision_feature_select_strategy self.image_token = tokenizer.image_token if hasattr(tokenizer, 'image_token') else image_token self.image_token_id = tokenizer.encode(self.image_token, add_special_tokens=False)[0] super().__init__(image_processor, tokenizer, chat_template=chat_template) def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[LlavaProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if images is None and text is None: raise ValueError('You have to specify at least one of `images` or `text`.') output_kwargs = self._merge_kwargs(LlavaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) if images is not None: image_inputs = self.image_processor(images, **output_kwargs['images_kwargs']) else: image_inputs = {} if isinstance(text, str): text = [text] elif not isinstance(text, list) and (not isinstance(text[0], str)): raise TypeError('Invalid input text. Please provide a string, or a list of strings') prompt_strings = text if image_inputs.get('pixel_values') is not None: pixel_values = image_inputs['pixel_values'] height, width = get_image_size(to_numpy_array(pixel_values[0])) num_image_tokens = height // self.patch_size * (width // self.patch_size) + self.num_additional_image_tokens if self.vision_feature_select_strategy == 'default': num_image_tokens -= 1 prompt_strings = [] for sample in text: sample = sample.replace(self.image_token, self.image_token * num_image_tokens) prompt_strings.append(sample) return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None) return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', False) text_inputs = self.tokenizer(prompt_strings, **output_kwargs['text_kwargs'], return_tensors=None) self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=['image']) if return_mm_token_type_ids: array_ids = np.array(text_inputs['input_ids']) mm_token_type_ids = np.zeros_like(text_inputs['input_ids']) mm_token_type_ids[array_ids == self.image_token_id] = 1 text_inputs['mm_token_type_ids'] = mm_token_type_ids.tolist() return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: images_kwargs = LlavaProcessorKwargs._defaults.get('images_kwargs', {}) images_kwargs.update(kwargs) crop_size = images_kwargs.get('crop_size', None) or self.image_processor.crop_size resized_height, resized_width = (crop_size['height'], crop_size['width']) num_image_tokens = resized_height // self.patch_size * (resized_width // self.patch_size) num_image_tokens += self.num_additional_image_tokens if self.vision_feature_select_strategy == 'default': num_image_tokens -= 1 num_image_tokens = [num_image_tokens] * len(image_sizes) num_image_patches = [1] * len(image_sizes) vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data)
class LlavaProcessor(ProcessorMixin): ''' Constructs a LLaVa processor which wraps a LLaVa image processor and a LLaMa tokenizer into a single processor. [`LlavaProcessor`] offers all the functionalities of [`LlavaImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaProcessor.__call__`] and [`~LlavaProcessor.decode`] for more information. Args: image_processor ([`LlavaImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. patch_size (`int`, *optional*): Patch size from the vision tower. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Should be same as in model's config chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. num_additional_image_tokens (`int`, *optional*, defaults to 0): Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other extra tokens appended, no need to set this arg. ''' def __init__(self, image_processor=None, tokenizer=None, patch_size=None, vision_feature_select_strategy=None, chat_template=None, image_token='<image>', num_additional_image_tokens=0, **kwargs): pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[LlavaProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. ''' pass def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): ''' Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. ''' pass
4
3
22
2
13
8
3
0.87
1
7
2
0
5
4
5
22
156
17
75
42
51
65
43
24
37
8
2
2
13
3,410
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava/processing_llava.py
transformers.models.llava.processing_llava.LlavaProcessorKwargs
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack class LlavaProcessorKwargs(ProcessingKwargs, total=False): _defaults = {'text_kwargs': {'padding': False, 'return_mm_token_type_ids': False}, 'images_kwargs': {}}
class LlavaProcessorKwargs(ProcessingKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
7
0
7
2
6
0
2
2
1
0
3
0
0
3,411
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next/configuration_llava_next.py
transformers.models.llava_next.configuration_llava_next.LlavaNextConfig
from ...configuration_utils import PretrainedConfig from ..auto import CONFIG_MAPPING, AutoConfig class LlavaNextConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`LlavaNextForConditionalGeneration`]. It is used to instantiate an Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [llava-hf/llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf) model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 32000): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. If `"full"`, the full vision features are used. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`): A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list of the form `(height, width)`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. image_seq_length (`int`, *optional*, defaults to 576): Sequence length of one image embedding. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. Example: ```python >>> from transformers import LlavaNextForConditionalGeneration, LlavaNextConfig, CLIPVisionConfig, LlamaConfig >>> # Initializing a CLIP-vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> # Initializing a Llava-Next llava-hf/llava-v1.6-mistral-7b-hf style configuration >>> configuration = LlavaNextConfig(vision_config, text_config) >>> # Initializing a model from the llava-hf/llava-v1.6-mistral-7b-hf style configuration >>> model = LlavaNextForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'llava_next' attribute_map = {'image_token_id': 'image_token_index'} sub_configs = {'text_config': AutoConfig, 'vision_config': AutoConfig} def __init__(self, vision_config=None, text_config=None, image_token_index=32000, projector_hidden_act='gelu', vision_feature_select_strategy='default', vision_feature_layer=-2, image_grid_pinpoints=None, tie_word_embeddings=False, image_seq_length=576, multimodal_projector_bias=True, **kwargs): self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.image_seq_length = image_seq_length self.multimodal_projector_bias = multimodal_projector_bias if vision_feature_select_strategy not in ['default', 'full']: raise ValueError(f"vision_feature_select_strategy should be one of 'default', 'full'.Got: {vision_feature_select_strategy}") self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]] self.image_grid_pinpoints = image_grid_pinpoints if isinstance(vision_config, dict): vision_config['model_type'] = vision_config.get('model_type', 'clip_vision_model') vision_config = CONFIG_MAPPING[vision_config['model_type']](**vision_config) elif vision_config is None: vision_config = CONFIG_MAPPING['clip_vision_model'](intermediate_size=4096, hidden_size=1024, patch_size=14, image_size=336, num_hidden_layers=24, num_attention_heads=16, vocab_size=32000, projection_dim=768) self.vision_config = vision_config if isinstance(text_config, dict): text_config['model_type'] = text_config.get('model_type', 'llama') text_config = CONFIG_MAPPING[text_config['model_type']](**text_config) elif text_config is None: text_config = CONFIG_MAPPING['llama']() self.text_config = text_config super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
class LlavaNextConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`LlavaNextForConditionalGeneration`]. It is used to instantiate an Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [llava-hf/llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf) model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 32000): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. If `"full"`, the full vision features are used. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`): A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list of the form `(height, width)`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. image_seq_length (`int`, *optional*, defaults to 576): Sequence length of one image embedding. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. Example: ```python >>> from transformers import LlavaNextForConditionalGeneration, LlavaNextConfig, CLIPVisionConfig, LlamaConfig >>> # Initializing a CLIP-vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> # Initializing a Llava-Next llava-hf/llava-v1.6-mistral-7b-hf style configuration >>> configuration = LlavaNextConfig(vision_config, text_config) >>> # Initializing a model from the llava-hf/llava-v1.6-mistral-7b-hf style configuration >>> model = LlavaNextForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vision_config=None, text_config=None, image_token_index=32000, projector_hidden_act='gelu', vision_feature_select_strategy='default', vision_feature_layer=-2, image_grid_pinpoints=None, tie_word_embeddings=False, image_seq_length=576, multimodal_projector_bias=True, **kwargs): pass
2
1
64
7
57
0
9
0.82
1
3
0
0
1
10
1
1
127
18
60
28
44
49
26
14
24
9
1
1
9
3,412
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next/image_processing_llava_next.py
transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor
import numpy as np from collections.abc import Iterable from ...utils import TensorType, is_vision_available, logging from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_patch_output_size, get_size_dict, select_best_resolution from ...image_transforms import PaddingMode, convert_to_rgb, get_resize_output_image_size, pad, resize, to_channel_dimension_format from typing import Optional, Union from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments class LlavaNextImageProcessor(BaseImageProcessor): """ Constructs a LLaVa-NeXT image processor. Based on [`CLIPImageProcessor`] with incorporation of additional techniques for processing high resolution images as explained in the [LLaVa paper](https://huggingface.co/papers/2310.03744). Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ['pixel_values', 'image_sizes'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, image_grid_pinpoints: Optional[list]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=True, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]] crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size') self.do_resize = do_resize self.size = size self.image_grid_pinpoints = image_grid_pinpoints self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_pad = do_pad self.do_convert_rgb = do_convert_rgb def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ default_to_square = True if 'shortest_edge' in size: size = size['shortest_edge'] default_to_square = False elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def pad(self, image: np.ndarray, padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]], mode: PaddingMode=PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]]=0.0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: """ Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`) dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected as input. Args: image (`np.ndarray`): The image to pad. padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`): Padding to apply to the edges of the height, width axes. Can be one of three formats: - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis. - `((before, after),)` yields same before and after pad for height and width. - `(pad,)` or int is a shortcut for before = after = pad width for all axes. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. """ if isinstance(padding, int) or len(padding) != 4: return pad(image, padding, mode, constant_values, data_format, input_data_format) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if mode == PaddingMode.CONSTANT: image = np.pad(image, padding, mode='constant', constant_values=constant_values) elif mode == PaddingMode.REFLECT: image = np.pad(image, padding, mode='reflect') elif mode == PaddingMode.REPLICATE: image = np.pad(image, padding, mode='edge') elif mode == PaddingMode.SYMMETRIC: image = np.pad(image, padding, mode='symmetric') else: raise ValueError(f'Invalid padding mode: {mode}') image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image def _preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Image.Image: """ Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ images = make_flat_list_of_images(images) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) all_images.append(image) images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images] return images def _resize_for_patching(self, image: np.ndarray, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array: """ Resizes an image to a target resolution while maintaining aspect ratio. Args: image (np.array): The input image. target_resolution (tuple): The target resolution (height, width) of the image. resample (`PILImageResampling`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: np.array: The resized and padded image. """ new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format) resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format) return resized_image def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple): original_height, original_width = original_resolution target_height, target_width = target_resolution paste_x, r_x = divmod(target_width - original_width, 2) paste_y, r_y = divmod(target_height - original_height, 2) return ((paste_y, paste_y + r_y), (paste_x, paste_x + r_x)) def _pad_for_patching(self, image: np.ndarray, target_resolution: tuple, input_data_format: ChannelDimension) -> np.array: """ Pad an image to a target resolution while maintaining aspect ratio. """ new_resolution = get_patch_output_size(image, target_resolution, input_data_format) padding = self._get_padding_size(new_resolution, target_resolution) padded_image = self.pad(image, padding=padding) return padded_image def get_image_patches(self, image: np.ndarray, grid_pinpoints, size: tuple, patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> list[np.array]: """ Process an image with variable resolutions by dividing it into patches. Args: image (np.array): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. resample (`PILImageResampling`): Resampling filter to use if resizing the image. data_format (`ChannelDimension` or `str`): The channel dimension format for the output image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: list[np.array]: A list of NumPy arrays containing the processed image patches. """ if not isinstance(grid_pinpoints, list): raise TypeError('grid_pinpoints must be a list of possible resolutions.') possible_resolutions = grid_pinpoints image_size = get_image_size(image, channel_dim=input_data_format) best_resolution = select_best_resolution(image_size, possible_resolutions) resized_image = self._resize_for_patching(image, best_resolution, resample=resample, input_data_format=input_data_format) padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format) patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format) patches = [to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format) for patch in patches] resized_original_image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format) image_patches = [resized_original_image] + patches return image_patches def _pad_for_batching(self, pixel_values: list[np.ndarray], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[np.ndarray]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: list[`np.ndarray`]: The padded images. """ max_patch = max((len(x) for x in pixel_values)) pixel_values = [self.pad(image, padding=((0, max_patch - image.shape[0]), (0, 0), (0, 0), (0, 0)), data_format=data_format, input_data_format=input_data_format) for image in pixel_values] return pixel_values def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, image_grid_pinpoints: Optional[list]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None): """ Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. image_grid_pinpoints (`List` *optional*, defaults to `self.image_grid_pinpoints`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name='size', default_to_square=False) image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else self.image_grid_pinpoints resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = self.fetch_images(images) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) processed_images = [] image_sizes = [get_image_size(image, channel_dim=input_data_format) for image in images] for image in images: image_patches = self.get_image_patches(image, image_grid_pinpoints, size=(size['shortest_edge'], size['shortest_edge']) if 'shortest_edge' in size else (min(size['height'], size['width']), min(size['height'], size['width'])), patch_size=crop_size['height'], resample=resample, data_format=input_data_format, input_data_format=input_data_format) pixel_values = self._preprocess(image_patches, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) pixel_values = np.array(pixel_values) processed_images.append(pixel_values) if do_pad: processed_images = self._pad_for_batching(processed_images) return BatchFeature(data={'pixel_values': processed_images, 'image_sizes': image_sizes}, tensor_type=return_tensors)
class LlavaNextImageProcessor(BaseImageProcessor): ''' Constructs a LLaVa-NeXT image processor. Based on [`CLIPImageProcessor`] with incorporation of additional techniques for processing high resolution images as explained in the [LLaVa paper](https://huggingface.co/papers/2310.03744). Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, image_grid_pinpoints: Optional[list]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=True, do_convert_rgb: bool=True, **kwargs) -> None: pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass def pad(self, image: np.ndarray, padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]], mode: PaddingMode=PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]]=0.0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: ''' Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`) dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected as input. Args: image (`np.ndarray`): The image to pad. padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`): Padding to apply to the edges of the height, width axes. Can be one of three formats: - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis. - `((before, after),)` yields same before and after pad for height and width. - `(pad,)` or int is a shortcut for before = after = pad width for all axes. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. ''' pass def _preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Image.Image: ''' Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass def _resize_for_patching(self, image: np.ndarray, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array: ''' Resizes an image to a target resolution while maintaining aspect ratio. Args: image (np.array): The input image. target_resolution (tuple): The target resolution (height, width) of the image. resample (`PILImageResampling`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: np.array: The resized and padded image. ''' pass def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple): pass def _pad_for_patching(self, image: np.ndarray, target_resolution: tuple, input_data_format: ChannelDimension) -> np.array: ''' Pad an image to a target resolution while maintaining aspect ratio. ''' pass def get_image_patches(self, image: np.ndarray, grid_pinpoints, size: tuple, patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> list[np.array]: ''' Process an image with variable resolutions by dividing it into patches. Args: image (np.array): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. resample (`PILImageResampling`): Resampling filter to use if resizing the image. data_format (`ChannelDimension` or `str`): The channel dimension format for the output image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: list[np.array]: A list of NumPy arrays containing the processed image patches. ''' pass def _pad_for_batching(self, pixel_values: list[np.ndarray], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): ''' Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[np.ndarray]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: list[`np.ndarray`]: The padded images. ''' pass def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, image_grid_pinpoints: Optional[list]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None): ''' Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. image_grid_pinpoints (`List` *optional*, defaults to `self.image_grid_pinpoints`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
11
9
61
5
33
23
5
0.85
1
12
3
0
9
13
9
29
610
58
299
133
205
253
124
49
114
21
3
2
49
3,413
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next/image_processing_llava_next_fast.py
transformers.models.llava_next.image_processing_llava_next_fast.LlavaNextImageProcessorFast
import torch from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, get_image_size from ...image_processing_utils import BatchFeature, get_patch_output_size, select_best_resolution from ...utils import TensorType, auto_docstring, is_torchvision_v2_available from ...processing_utils import Unpack from typing import Optional, Union from ...image_processing_utils_fast import BaseImageProcessorFast, DefaultFastImageProcessorKwargs, divide_to_patches, group_images_by_shape, reorder_images @auto_docstring class LlavaNextImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {'shortest_edge': 224} default_to_square = False crop_size = {'height': 224, 'width': 224} do_resize = True do_center_crop = True do_rescale = True do_normalize = True do_convert_rgb = True do_pad = True image_grid_pinpoints = [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]] valid_kwargs = LlavaNextFastImageProcessorKwargs def __init__(self, **kwargs: Unpack[LlavaNextFastImageProcessorKwargs]): super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaNextFastImageProcessorKwargs]) -> BatchFeature: return super().preprocess(images, **kwargs) def _resize_for_patching(self, image: 'torch.Tensor', target_resolution: tuple, interpolation: 'F.InterpolationMode', input_data_format: ChannelDimension) -> 'torch.Tensor': """ Resizes an image to a target resolution while maintaining aspect ratio. Args: image ("torch.Tensor"): The input image. target_resolution (tuple): The target resolution (height, width) of the image. interpolation (`InterpolationMode`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: "torch.Tensor": The resized and padded image. """ new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format) resized_image = self.resize(image=image, size=SizeDict(height=new_height, width=new_width), interpolation=interpolation) return resized_image def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple): original_height, original_width = original_resolution target_height, target_width = target_resolution paste_x, r_x = divmod(target_width - original_width, 2) paste_y, r_y = divmod(target_height - original_height, 2) return [paste_x, paste_y, paste_x + r_x, paste_y + r_y] def _pad_for_patching(self, image: 'torch.Tensor', target_resolution: tuple, input_data_format: ChannelDimension) -> 'torch.Tensor': """ Pad an image to a target resolution while maintaining aspect ratio. """ new_resolution = get_patch_output_size(image, target_resolution, input_data_format) padding = self._get_padding_size(new_resolution, target_resolution) padded_image = F.pad(image, padding=padding) return padded_image def _get_image_patches(self, image: 'torch.Tensor', grid_pinpoints, size: tuple, patch_size: int, interpolation: 'F.InterpolationMode') -> list['torch.Tensor']: """ Process an image with variable resolutions by dividing it into patches. Args: image ("torch.Tensor"): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. interpolation (`"InterpolationMode"`): Resampling filter to use if resizing the image. Returns: list["torch.Tensor"]: A list of NumPy arrays containing the processed image patches. """ if not isinstance(grid_pinpoints, list): raise TypeError('grid_pinpoints must be a list of possible resolutions.') possible_resolutions = grid_pinpoints image_size = get_image_size(image, channel_dim=ChannelDimension.FIRST) best_resolution = select_best_resolution(image_size, possible_resolutions) resized_image = self._resize_for_patching(image, best_resolution, interpolation=interpolation, input_data_format=ChannelDimension.FIRST) padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=ChannelDimension.FIRST) patches = divide_to_patches(padded_image, patch_size=patch_size) resized_original_image = F.resize(image, size=size, interpolation=interpolation) image_patches = [resized_original_image] + patches return image_patches def _pad_for_batching(self, pixel_values: list['torch.Tensor']) -> list['torch.Tensor']: """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[torch.Tensor]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) Returns: list[`torch.Tensor`]: The padded images. """ max_patch = max((len(x) for x in pixel_values)) pixel_values = [torch.nn.functional.pad(image, pad=[0, 0, 0, 0, 0, 0, 0, max_patch - image.shape[0]]) for image in pixel_values] return pixel_values def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: SizeDict, image_grid_pinpoints: list[list[int]], interpolation: Optional['F.InterpolationMode'], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: processed_images = [] image_sizes = [] if size and size.height and size.width: size_tuple = (size.height, size.width) else: size_tuple = (size.shortest_edge, size.shortest_edge) if crop_size and crop_size.height: patch_size = crop_size.height elif size and size.height: patch_size = size.height else: patch_size = size.shortest_edge for image in images: image_patches = self._get_image_patches(image, image_grid_pinpoints, size=size_tuple, patch_size=patch_size, interpolation=interpolation) processed_image_patches_grouped = {} grouped_image_patches, grouped_image_patches_index = group_images_by_shape(image_patches, disable_grouping=disable_grouping) for shape, stacked_image_patches in grouped_image_patches.items(): if do_resize: stacked_image_patches = self.resize(image=stacked_image_patches, size=size, interpolation=interpolation) if do_center_crop: stacked_image_patches = self.center_crop(stacked_image_patches, crop_size) stacked_image_patches = self.rescale_and_normalize(stacked_image_patches, do_rescale, rescale_factor, do_normalize, image_mean, image_std) processed_image_patches_grouped[shape] = stacked_image_patches processed_image_patches = reorder_images(processed_image_patches_grouped, grouped_image_patches_index) processed_image_patches = torch.stack(processed_image_patches, dim=0) if return_tensors else processed_image_patches processed_images.append(processed_image_patches) image_sizes.append(get_image_size(image, ChannelDimension.FIRST)) if do_pad: processed_images = self._pad_for_batching(processed_images) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={'pixel_values': processed_images, 'image_sizes': image_sizes}, tensor_type=return_tensors)
@auto_docstring class LlavaNextImageProcessorFast(BaseImageProcessorFast): def __init__(self, **kwargs: Unpack[LlavaNextFastImageProcessorKwargs]): pass @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaNextFastImageProcessorKwargs]) -> BatchFeature: pass def _resize_for_patching(self, image: 'torch.Tensor', target_resolution: tuple, interpolation: 'F.InterpolationMode', input_data_format: ChannelDimension) -> 'torch.Tensor': ''' Resizes an image to a target resolution while maintaining aspect ratio. Args: image ("torch.Tensor"): The input image. target_resolution (tuple): The target resolution (height, width) of the image. interpolation (`InterpolationMode`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: "torch.Tensor": The resized and padded image. ''' pass def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple): pass def _pad_for_patching(self, image: 'torch.Tensor', target_resolution: tuple, input_data_format: ChannelDimension) -> 'torch.Tensor': ''' Pad an image to a target resolution while maintaining aspect ratio. ''' pass def _get_image_patches(self, image: 'torch.Tensor', grid_pinpoints, size: tuple, patch_size: int, interpolation: 'F.InterpolationMode') -> list['torch.Tensor']: ''' Process an image with variable resolutions by dividing it into patches. Args: image ("torch.Tensor"): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. interpolation (`"InterpolationMode"`): Resampling filter to use if resizing the image. Returns: list["torch.Tensor"]: A list of NumPy arrays containing the processed image patches. ''' pass def _pad_for_batching(self, pixel_values: list['torch.Tensor']) -> list['torch.Tensor']: ''' Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[torch.Tensor]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) Returns: list[`torch.Tensor`]: The padded images. ''' pass def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: SizeDict, image_grid_pinpoints: list[list[int]], interpolation: Optional['F.InterpolationMode'], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: pass
11
4
25
3
16
7
2
0.37
1
13
5
1
8
0
8
42
238
30
152
100
93
56
78
50
69
11
4
3
19
3,414
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next/modeling_llava_next.py
transformers.models.llava_next.modeling_llava_next.LlavaNextCausalLMOutputWithPast
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from dataclasses import dataclass import torch from typing import Optional, Union from ...cache_utils import Cache @dataclass @auto_docstring(custom_intro='\n Base class for LlavaNext causal language model (or autoregressive) outputs.\n ') class LlavaNextCausalLMOutputWithPast(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring(custom_intro='\n Base class for LlavaNext causal language model (or autoregressive) outputs.\n ') class LlavaNextCausalLMOutputWithPast(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. ''' pass
3
1
0
0
0
0
0
3.57
1
0
0
0
0
0
0
0
37
5
7
7
6
25
7
7
6
0
1
0
0
3,415
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next/modeling_llava_next.py
transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration
import torch from torch import nn from typing import Optional, Union from .configuration_llava_next import LlavaNextConfig from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...processing_utils import Unpack from ...generation import GenerationMixin from ...cache_utils import Cache @auto_docstring(custom_intro='\n The LLAVA-NeXT model which consists of a vision backbone and a language model.\n ') class LlavaNextForConditionalGeneration(LlavaNextPreTrainedModel, GenerationMixin): _checkpoint_conversion_mapping = {'^language_model.model': 'model.language_model', '^vision_tower': 'model.vision_tower', '^multi_modal_projector': 'model.multi_modal_projector', '^image_newline': 'model.image_newline', '^language_model.lm_head': 'lm_head'} _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: LlavaNextConfig): super().__init__(config) self.model = LlavaNextModel(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def get_output_embeddings(self) -> nn.Module: return self.lm_head def set_decoder(self, decoder): self.model.set_decoder(decoder) def get_decoder(self): return self.model.get_decoder() def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None): return self.model.pack_image_features(image_features=image_features, image_sizes=image_sizes, vision_feature_select_strategy=vision_feature_select_strategy, image_newline=image_newline) def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): return self.model.get_image_features(pixel_values=pixel_values, image_sizes=image_sizes, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy) @property def language_model(self): return self.model.language_model @property def vision_tower(self): return self.model.vision_tower @property def multi_modal_projector(self): return self.model.multi_modal_projector @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, LlavaNextCausalLMOutputWithPast]: """ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. If `"full"`, the full vision features are used. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, LlavaNextForConditionalGeneration >>> model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") >>> prompt = "[INST] <image>\\nWhat is shown in this image? [/INST]" >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, text=prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_length=30) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "[INST] \\nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot (...)" ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy outputs = self.model(input_ids, pixel_values=pixel_values, image_sizes=image_sizes, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs) hidden_states = outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs) return LlavaNextCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, image_sizes=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs): model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs) if cache_position[0] == 0: model_inputs['pixel_values'] = pixel_values model_inputs['image_sizes'] = image_sizes return model_inputs @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
@auto_docstring(custom_intro='\n The LLAVA-NeXT model which consists of a vision backbone and a language model.\n ') class LlavaNextForConditionalGeneration(LlavaNextPreTrainedModel, GenerationMixin): def __init__(self, config: LlavaNextConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_output_embeddings(self) -> nn.Module: pass def set_decoder(self, decoder): pass def get_decoder(self): pass def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None): pass def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): pass @property def language_model(self): pass @property def vision_tower(self): pass @property def multi_modal_projector(self): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, LlavaNextCausalLMOutputWithPast]: ''' vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. If `"full"`, the full vision features are used. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, LlavaNextForConditionalGeneration >>> model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") >>> prompt = "[INST] <image>\nWhat is shown in this image? [/INST]" >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, text=prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_length=30) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot (...)" ```''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, image_sizes=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass
22
2
42
4
25
13
4
0.54
2
13
5
0
14
8
14
15
611
62
357
124
290
193
191
74
176
16
2
3
57
3,416
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next/modeling_llava_next.py
transformers.models.llava_next.modeling_llava_next.LlavaNextMultiModalProjector
from torch import nn from ...activations import ACT2FN from .configuration_llava_next import LlavaNextConfig class LlavaNextMultiModalProjector(nn.Module): def __init__(self, config: LlavaNextConfig): super().__init__() num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer) self.linear_1 = nn.Linear(config.vision_config.hidden_size * num_feature_layers, config.text_config.hidden_size, bias=config.multimodal_projector_bias) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias) def forward(self, image_features): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states
class LlavaNextMultiModalProjector(nn.Module): def __init__(self, config: LlavaNextConfig): pass def forward(self, image_features): pass
3
0
9
0
9
1
2
0.06
1
3
1
0
2
3
2
12
20
1
18
8
15
1
12
8
9
2
1
0
3
3,417
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next/modeling_llava_next.py
transformers.models.llava_next.modeling_llava_next.LlavaNextPreTrainedModel
from ...modeling_utils import PreTrainedModel from torch import nn from .configuration_llava_next import LlavaNextConfig from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging import math @auto_docstring class LlavaNextPreTrainedModel(PreTrainedModel): config: LlavaNextConfig base_model_prefix = '' supports_gradient_checkpointing = True _no_split_modules = ['LlamaDecoderLayer'] _skip_keys_device_placement = 'past_key_values' _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module): std = getattr(self.config, 'initializer_range', self.config.get_text_config().initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, LlavaNextModel): embed_std = 1 / math.sqrt(self.config.text_config.hidden_size) module.image_newline.data.normal_(mean=0.0, std=embed_std)
@auto_docstring class LlavaNextPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
21
2
16
3
7
0.12
1
0
0
1
1
0
1
1
31
3
25
11
23
3
20
11
18
7
1
2
7
3,418
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next/processing_llava_next.py
transformers.models.llava_next.processing_llava_next.LlavaNextProcessor
from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...image_utils import ImageInput, get_image_size, to_numpy_array from ...feature_extraction_utils import BatchFeature from typing import Optional, Union from ...image_processing_utils import select_best_resolution from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack import numpy as np class LlavaNextProcessor(ProcessorMixin): """ Constructs a LLaVa-NeXT processor which wraps a LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor. [`LlavaNextProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information. Args: image_processor ([`LlavaNextImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. patch_size (`int`, *optional*): Patch size from the vision tower. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Should be same as in model's config chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. num_additional_image_tokens (`int`, *optional*, defaults to 0): Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other extra tokens appended, no need to set this arg. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = 'AutoImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor=None, tokenizer=None, patch_size=None, vision_feature_select_strategy=None, chat_template=None, image_token='<image>', num_additional_image_tokens=0, **kwargs): self.patch_size = patch_size self.num_additional_image_tokens = num_additional_image_tokens self.vision_feature_select_strategy = vision_feature_select_strategy self.image_token = tokenizer.image_token if hasattr(tokenizer, 'image_token') else image_token self.image_token_id = tokenizer.image_token_id if getattr(tokenizer, 'image_token_id', None) else tokenizer.convert_tokens_to_ids(self.image_token) super().__init__(image_processor, tokenizer, chat_template=chat_template) def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[LlavaNextProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if images is None and text is None: raise ValueError('You have to specify at least images or text.') output_kwargs = self._merge_kwargs(LlavaNextProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) if images is not None: image_inputs = self.image_processor(images, **output_kwargs['images_kwargs']) else: image_inputs = {} if isinstance(text, str): text = [text] elif not isinstance(text, list) and (not isinstance(text[0], str)): raise TypeError('Invalid input text. Please provide a string, or a list of strings') prompt_strings = text if image_inputs: image_sizes = iter(image_inputs['image_sizes']) height, width = get_image_size(to_numpy_array(image_inputs['pixel_values'][0][0])) prompt_strings = [] for sample in text: while self.image_token in sample: image_size = next(image_sizes) if not isinstance(image_size, (list, tuple)): image_size = image_size.tolist() orig_height, orig_width = image_size num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width) if self.vision_feature_select_strategy == 'default': num_image_tokens -= 1 sample = sample.replace(self.image_token, '<placeholder>' * num_image_tokens, 1) prompt_strings.append(sample) prompt_strings = [sample.replace('<placeholder>', self.image_token) for sample in prompt_strings] return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None) return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', None) text_inputs = self.tokenizer(prompt_strings, **output_kwargs['text_kwargs']) self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=['image']) if return_mm_token_type_ids: array_ids = np.array(text_inputs['input_ids']) mm_token_type_ids = np.zeros_like(text_inputs['input_ids']) mm_token_type_ids[array_ids == self.image_token_id] = 1 text_inputs['mm_token_type_ids'] = mm_token_type_ids.tolist() return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors) def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int: image_grid_pinpoints = self.image_processor.image_grid_pinpoints height_best_resolution, width_best_resolution = select_best_resolution([orig_height, orig_width], image_grid_pinpoints) scale_height, scale_width = (height_best_resolution // height, width_best_resolution // width) patches_height = height // self.patch_size patches_width = width // self.patch_size unpadded_features, newline_features = self._get_unpadded_features(orig_height, orig_width, patches_height, patches_width, scale_height, scale_width) base_features = patches_height * patches_width + self.num_additional_image_tokens num_image_tokens = unpadded_features + newline_features + base_features return num_image_tokens def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width): """ Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA because it divided each image into patches depending on its resolution. Therefore we need to calculate how many patches an image is divided into and get the number of features from that. """ current_height = patches_height * scale_height current_width = patches_width * scale_width original_aspect_ratio = width / height current_aspect_ratio = current_width / current_height if original_aspect_ratio > current_aspect_ratio: new_height = int(round(height * (current_width / width), 7)) padding = (current_height - new_height) // 2 current_height -= padding * 2 else: new_width = int(round(width * (current_height / height), 7)) padding = (current_width - new_width) // 2 current_width -= padding * 2 unpadded_features = current_height * current_width newline_features = current_height return (unpadded_features, newline_features) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: images_kwargs = LlavaNextProcessorKwargs._defaults.get('images_kwargs', {}) images_kwargs.update(kwargs) size = images_kwargs.get('size', None) or self.image_processor.size size = (size['shortest_edge'], size['shortest_edge']) if 'shortest_edge' in size else (min(size['height'], size['width']), min(size['height'], size['width'])) processed_height, processed_width = size batch_num_image_tokens = [] num_image_patches = [1] * len(image_sizes) for image_size in image_sizes: orig_height, orig_width = image_size num_image_tokens = self._get_number_of_features(orig_height, orig_width, processed_height, processed_width) if self.vision_feature_select_strategy == 'default': num_image_tokens -= 1 batch_num_image_tokens.append(num_image_tokens) vision_data.update({'num_image_tokens': batch_num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data)
class LlavaNextProcessor(ProcessorMixin): ''' Constructs a LLaVa-NeXT processor which wraps a LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor. [`LlavaNextProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information. Args: image_processor ([`LlavaNextImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. patch_size (`int`, *optional*): Patch size from the vision tower. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Should be same as in model's config chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. num_additional_image_tokens (`int`, *optional*, defaults to 0): Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other extra tokens appended, no need to set this arg. ''' def __init__(self, image_processor=None, tokenizer=None, patch_size=None, vision_feature_select_strategy=None, chat_template=None, image_token='<image>', num_additional_image_tokens=0, **kwargs): pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[LlavaNextProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. ''' pass def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int: pass def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width): ''' Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA because it divided each image into patches depending on its resolution. Therefore we need to calculate how many patches an image is divided into and get the number of features from that. ''' pass def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): ''' Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. ''' pass
6
4
21
2
14
6
3
0.59
1
9
2
0
7
4
7
24
194
22
109
63
83
64
74
45
66
10
2
4
18
3,419
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next/processing_llava_next.py
transformers.models.llava_next.processing_llava_next.LlavaNextProcessorKwargs
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack class LlavaNextProcessorKwargs(ProcessingKwargs, total=False): _defaults = {'text_kwargs': {'padding': False, 'return_mm_token_type_ids': False}, 'images_kwargs': {'do_pad': True}}
class LlavaNextProcessorKwargs(ProcessingKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
9
0
9
2
8
0
2
2
1
0
3
0
0
3,420
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/image_processing_llava_next_video.py
transformers.models.llava_next_video.image_processing_llava_next_video.LlavaNextVideoImageProcessor
from ...utils import TensorType, logging from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, validate_preprocess_arguments from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...video_utils import VideoInput, make_batched_videos from typing import Optional, Union import numpy as np from ...image_transforms import convert_to_rgb, get_resize_output_image_size, resize, to_channel_dimension_format class LlavaNextVideoImageProcessor(BaseImageProcessor): """ Constructs a LLaVa-NeXT-Video video processor. Based on [`CLIPImageProcessor`] with incorporation of processing each video frame. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess` method. Not used for processing videos. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ['pixel_values_videos'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, image_grid_pinpoints: Optional[list]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size') self.do_resize = do_resize self.size = size self.image_grid_pinpoints = image_grid_pinpoints self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ default_to_square = True if 'shortest_edge' in size: size = size['shortest_edge'] default_to_square = False elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def _preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> list[np.ndarray]: """ Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`. Args: images (`ImageInput`): Batch of frames (one video) to preprocess. Expects a batch of frames with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ images = make_flat_list_of_images(images) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) all_images.append(image) images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images] return images def preprocess(self, images: VideoInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None): """ Args: images (`VideoInput`): Videos to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the video. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the video after resizing. Shortest edge of the video is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the video. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the video. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the video. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the video by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the video. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Frame mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Frame standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the video to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name='size', default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = self.fetch_images(images) images = make_batched_videos(images) logger.warning('`LlavaNextVideoImageProcessor` is deprecated and will be removed in v5.0. We recommend to load an instance of `LlavaNextVideoVideoProcessor` to process videos for the model. ') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) pixel_values = [self._preprocess(frames, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for frames in images] data = {'pixel_values_videos': pixel_values} return BatchFeature(data=data, tensor_type=return_tensors)
class LlavaNextVideoImageProcessor(BaseImageProcessor): ''' Constructs a LLaVa-NeXT-Video video processor. Based on [`CLIPImageProcessor`] with incorporation of processing each video frame. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess` method. Not used for processing videos. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, image_grid_pinpoints: Optional[list]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass def _preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> list[np.ndarray]: ''' Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`. Args: images (`ImageInput`): Batch of frames (one video) to preprocess. Expects a batch of frames with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass def preprocess(self, images: VideoInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None): ''' Args: images (`VideoInput`): Videos to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the video. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the video after resizing. Shortest edge of the video is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the video. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the video. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the video. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the video by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the video. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Frame mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Frame standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the video to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
5
4
75
5
44
27
7
0.85
1
9
2
0
4
12
4
24
349
24
176
79
115
149
70
23
65
12
3
2
29
3,421
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/modeling_llava_next_video.py
transformers.models.llava_next_video.modeling_llava_next_video.LlavaNextVideoCausalLMOutputWithPast
from dataclasses import dataclass import torch from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from ...cache_utils import Cache from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Base class for LlavaNextVideo causal language model (or autoregressive) outputs.\n ') class LlavaNextVideoCausalLMOutputWithPast(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. video_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`. video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None video_hidden_states: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring(custom_intro='\n Base class for LlavaNextVideo causal language model (or autoregressive) outputs.\n ') class LlavaNextVideoCausalLMOutputWithPast(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. video_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`. video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. ''' pass
3
1
0
0
0
0
0
3.5
1
0
0
0
0
0
0
0
43
7
8
8
7
28
8
8
7
0
1
0
0
3,422
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/modeling_llava_next_video.py
transformers.models.llava_next_video.modeling_llava_next_video.LlavaNextVideoForConditionalGeneration
from ...generation import GenerationMixin from ...cache_utils import Cache from ...processing_utils import Unpack from torch import nn from .configuration_llava_next_video import LlavaNextVideoConfig from typing import Optional, Union import torch from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging @auto_docstring(custom_intro='\n The LLAVA-NeXT model which consists of a vision backbone and a language model.\n ') class LlavaNextVideoForConditionalGeneration(LlavaNextVideoPreTrainedModel, GenerationMixin): _checkpoint_conversion_mapping = {'^language_model.model': 'model.language_model', '^vision_tower': 'model.vision_tower', '^multi_modal_projector': 'model.multi_modal_projector', '^image_newline': 'model.image_newline', '^language_model.lm_head': 'lm_head'} _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: LlavaNextVideoConfig): super().__init__(config) self.model = LlavaNextVideoModel(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def get_output_embeddings(self) -> nn.Module: return self.lm_head def set_decoder(self, decoder): self.model.set_decoder(decoder) def get_decoder(self): return self.model.get_decoder() def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None): return self.model.pack_image_features(image_features=image_features, image_sizes=image_sizes, vision_feature_select_strategy=vision_feature_select_strategy, image_newline=image_newline) def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): return self.model.get_image_features(pixel_values=pixel_values, image_sizes=image_sizes, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy) @property def language_model(self): return self.model.language_model @property def vision_tower(self): return self.model.vision_tower @property def multi_modal_projector(self): return self.model.multi_modal_projector @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_values_videos: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, LlavaNextVideoCausalLMOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> import av >>> from transformers import AutoProcessor, LlavaNextVideoForConditionalGeneration >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> model = LlavaNextVideoForConditionalGeneration.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf", device_map="auto") >>> processor = AutoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf") >>> prompt = "USER: <video>\\nWhy is this video funny? ASSISTANT:" >>> video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset") >>> container = av.open(video_path) >>> # sample uniformly 8 frames from the video (model was trained with 32 frames per video, but this video is short) >>> total_frames = container.streams.video[0].frames >>> indices = np.arange(0, total_frames, total_frames / 8).astype(int) >>> clip = read_video_pyav(container, indices) >>> inputs_video = processor(text=prompt, videos=clip, return_tensors="pt").to(model.device) >>> # load an image to generate from an image >>> prompt = "USER:<image>\\nWhat is shown in this image? ASSISTANT:" >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs_image = processor(text=prompt, images=image, return_tensors="pt").to(model.device) >>> # Generate from video >>> generate_ids = model.generate(**inputs_video, max_length=50) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER:\\nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and endearing sight of a baby wearing glasses and (...)" >>> # Generate from image >>> generate_ids = model.generate(**inputs_image, max_length=30) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER: \\nWhat's the content of the image? ASSISTANT: The image shows a red stop sign on a pole, with a traditional Chinese archway (...)" ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, image_sizes=image_sizes, **kwargs) hidden_states = outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs) return LlavaNextVideoCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, video_hidden_states=outputs.video_hidden_states) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, pixel_values_videos=None, image_sizes=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs): model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs) if cache_position[0] == 0: model_inputs['pixel_values'] = pixel_values model_inputs['pixel_values_videos'] = pixel_values_videos model_inputs['image_sizes'] = image_sizes return model_inputs @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): return self.model.get_video_features(pixel_values=pixel_values, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy)
@auto_docstring(custom_intro='\n The LLAVA-NeXT model which consists of a vision backbone and a language model.\n ') class LlavaNextVideoForConditionalGeneration(LlavaNextVideoPreTrainedModel, GenerationMixin): def __init__(self, config: LlavaNextVideoConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_output_embeddings(self) -> nn.Module: pass def set_decoder(self, decoder): pass def get_decoder(self): pass def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None): pass def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): pass @property def language_model(self): pass @property def vision_tower(self): pass @property def multi_modal_projector(self): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_values_videos: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, LlavaNextVideoCausalLMOutputWithPast]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> import av >>> from transformers import AutoProcessor, LlavaNextVideoForConditionalGeneration >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> model = LlavaNextVideoForConditionalGeneration.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf", device_map="auto") >>> processor = AutoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf") >>> prompt = "USER: <video>\nWhy is this video funny? ASSISTANT:" >>> video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset") >>> container = av.open(video_path) >>> # sample uniformly 8 frames from the video (model was trained with 32 frames per video, but this video is short) >>> total_frames = container.streams.video[0].frames >>> indices = np.arange(0, total_frames, total_frames / 8).astype(int) >>> clip = read_video_pyav(container, indices) >>> inputs_video = processor(text=prompt, videos=clip, return_tensors="pt").to(model.device) >>> # load an image to generate from an image >>> prompt = "USER:<image>\nWhat is shown in this image? ASSISTANT:" >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs_image = processor(text=prompt, images=image, return_tensors="pt").to(model.device) >>> # Generate from video >>> generate_ids = model.generate(**inputs_video, max_length=50) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER:\nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and endearing sight of a baby wearing glasses and (...)" >>> # Generate from image >>> generate_ids = model.generate(**inputs_image, max_length=30) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER: \nWhat's the content of the image? ASSISTANT: The image shows a red stop sign on a pole, with a traditional Chinese archway (...)" ```''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, pixel_values_videos=None, image_sizes=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): pass
23
2
47
4
27
16
4
0.59
2
14
6
0
15
11
15
16
718
69
408
146
330
242
222
86
206
19
2
3
64
3,423
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/modeling_llava_next_video.py
transformers.models.llava_next_video.modeling_llava_next_video.LlavaNextVideoMultiModalProjector
from .configuration_llava_next_video import LlavaNextVideoConfig from ...activations import ACT2FN from torch import nn class LlavaNextVideoMultiModalProjector(nn.Module): def __init__(self, config: LlavaNextVideoConfig): super().__init__() num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer) self.linear_1 = nn.Linear(config.vision_config.hidden_size * num_feature_layers, config.text_config.hidden_size, bias=config.multimodal_projector_bias) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias) def forward(self, image_features): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states
class LlavaNextVideoMultiModalProjector(nn.Module): def __init__(self, config: LlavaNextVideoConfig): pass def forward(self, image_features): pass
3
0
9
0
9
1
2
0.06
1
3
1
0
2
3
2
12
20
1
18
8
15
1
12
8
9
2
1
0
3
3,424
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/modeling_llava_next_video.py
transformers.models.llava_next_video.modeling_llava_next_video.LlavaNextVideoPooler
import math from torch import nn class LlavaNextVideoPooler(nn.Module): def __init__(self, config): super().__init__() mode = config.spatial_pool_mode stride = config.spatial_pool_stride out_channels = getattr(config, 'spatial_pool_out_channels', config.vision_config.hidden_size) self.image_size = (config.vision_config.image_size // config.vision_config.patch_size) ** 2 if mode == 'average': self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride) elif mode == 'max': self.pool = nn.MaxPool2d(kernel_size=stride, stride=stride) elif mode == 'conv': self.pool = nn.Conv2d(in_channels=config.vision_config.hidden_size, out_channels=out_channels, kernel_size=stride, stride=stride) else: raise ValueError(f'Unknown pooling mode: {mode}. Has to be one of [`average`, `max`, `conv`]') def forward(self, image_features): ori_width = int(math.sqrt(image_features.shape[1] * self.image_size // self.image_size)) ori_height = int(ori_width * self.image_size // self.image_size) batch_size, _, dim = image_features.shape image_features_spatial = image_features.view(batch_size, ori_height, ori_height, dim).permute(0, 3, 1, 2) image_features_spatial_pool = self.pool(image_features_spatial) return image_features_spatial_pool.flatten(2).transpose(1, 2).contiguous()
class LlavaNextVideoPooler(nn.Module): def __init__(self, config): pass def forward(self, image_features): pass
3
0
15
2
13
0
3
0
1
3
0
0
2
2
2
12
32
5
27
13
24
0
19
13
16
4
1
1
5
3,425
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/modeling_llava_next_video.py
transformers.models.llava_next_video.modeling_llava_next_video.LlavaNextVideoPreTrainedModel
from .configuration_llava_next_video import LlavaNextVideoConfig from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging import math from torch import nn from ...modeling_utils import PreTrainedModel @auto_docstring class LlavaNextVideoPreTrainedModel(PreTrainedModel): config: LlavaNextVideoConfig base_model_prefix = '' supports_gradient_checkpointing = True _no_split_modules = ['LlamaDecoderLayer'] _skip_keys_device_placement = 'past_key_values' _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module): std = getattr(self.config, 'initializer_range', self.config.get_text_config().initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, LlavaNextVideoModel): embed_std = 1 / math.sqrt(self.config.text_config.hidden_size) module.image_newline.data.normal_(mean=0.0, std=embed_std)
@auto_docstring class LlavaNextVideoPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
21
2
16
3
7
0.12
1
0
0
1
1
0
1
1
31
3
25
11
23
3
20
11
18
7
1
2
7
3,426
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/modular_llava_next_video.py
transformers.models.llava_next_video.modular_llava_next_video.LlavaNextVideoCausalLMOutputWithPast
from typing import Optional, Union from transformers.models.llava_next.modeling_llava_next import LlavaNextCausalLMOutputWithPast, LlavaNextForConditionalGeneration, LlavaNextModel, LlavaNextModelOutputWithPast, LlavaNextMultiModalProjector, TransformersKwargs, image_size_to_num_patches import torch class LlavaNextVideoCausalLMOutputWithPast(LlavaNextCausalLMOutputWithPast): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. video_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`. video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ video_hidden_states: Optional[torch.FloatTensor] = None
class LlavaNextVideoCausalLMOutputWithPast(LlavaNextCausalLMOutputWithPast): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. video_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`. video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. ''' pass
1
1
0
0
0
0
0
2.5
1
0
0
0
0
0
0
0
8
1
2
2
1
5
2
2
1
0
2
0
0
3,427
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/modular_llava_next_video.py
transformers.models.llava_next_video.modular_llava_next_video.LlavaNextVideoConfig
from ..auto import CONFIG_MAPPING, AutoConfig from ...configuration_utils import PretrainedConfig class LlavaNextVideoConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`LlavaNextVideoForConditionalGeneration`]. It is used to instantiate an Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [llava-hf/LLaVA-NeXT-Video-7B-hf](https://huggingface.co/llava-hf/LLaVA-NeXT-Video-7B-hf) model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 32001): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. If `"full"`, the full vision features are used. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`): A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list of the form `(height, width)`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. video_token_index (`int`, *optional*, defaults to 32000): The video token index to encode the image prompt. spatial_pool_mode (`str`, *optional*, defaults to `"average"`): Pooling mode to use for videos. Can be "average", "max" or "conv". spatial_pool_stride (`int`, *optional*, defaults to 2): Stride used in the pooling layer for videos. image_seq_length (`int`, *optional*, defaults to 576): Sequence length of one image embedding. video_seq_length (`int`, *optional*, defaults to 288): Sequence length of one video embedding. Example: ```python >>> from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoConfig, CLIPVisionConfig, LlamaConfig >>> # Initializing a CLIP-vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> configuration = LlavaNextVideoConfig(vision_config, text_config) >>> model = LlavaNextVideoForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'llava_next_video' attribute_map = {'image_token_id': 'image_token_index', 'video_token_id': 'video_token_index'} sub_configs = {'text_config': AutoConfig, 'vision_config': AutoConfig} def __init__(self, vision_config=None, text_config=None, image_token_index=32001, projector_hidden_act='gelu', multimodal_projector_bias=True, vision_feature_select_strategy='default', vision_feature_layer=-2, image_grid_pinpoints=None, tie_word_embeddings=False, video_token_index=32000, spatial_pool_mode='average', spatial_pool_stride=2, image_seq_length=576, video_seq_length=288, **kwargs): self.video_token_index = video_token_index self.spatial_pool_mode = spatial_pool_mode self.spatial_pool_stride = spatial_pool_stride self.image_seq_length = image_seq_length self.video_seq_length = video_seq_length self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.multimodal_projector_bias = multimodal_projector_bias if vision_feature_select_strategy not in ['default', 'full']: raise ValueError(f"vision_feature_select_strategy should be one of 'default', 'full'.Got: {vision_feature_select_strategy}") self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]] self.image_grid_pinpoints = image_grid_pinpoints if isinstance(vision_config, dict): vision_config['model_type'] = vision_config.get('model_type', 'clip_vision_model') vision_config = CONFIG_MAPPING[vision_config['model_type']](**vision_config) elif vision_config is None: vision_config = CONFIG_MAPPING['clip_vision_model'](intermediate_size=4096, hidden_size=1024, patch_size=14, image_size=336, num_hidden_layers=24, num_attention_heads=16, vocab_size=32000, projection_dim=768) self.vision_config = vision_config if isinstance(text_config, dict): text_config['model_type'] = text_config.get('model_type', 'llama') text_config = CONFIG_MAPPING[text_config['model_type']](**text_config) elif text_config is None: text_config = CONFIG_MAPPING['llama']() self.text_config = text_config super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
class LlavaNextVideoConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`LlavaNextVideoForConditionalGeneration`]. It is used to instantiate an Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [llava-hf/LLaVA-NeXT-Video-7B-hf](https://huggingface.co/llava-hf/LLaVA-NeXT-Video-7B-hf) model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 32001): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. If `"full"`, the full vision features are used. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`): A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list of the form `(height, width)`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. video_token_index (`int`, *optional*, defaults to 32000): The video token index to encode the image prompt. spatial_pool_mode (`str`, *optional*, defaults to `"average"`): Pooling mode to use for videos. Can be "average", "max" or "conv". spatial_pool_stride (`int`, *optional*, defaults to 2): Stride used in the pooling layer for videos. image_seq_length (`int`, *optional*, defaults to 576): Sequence length of one image embedding. video_seq_length (`int`, *optional*, defaults to 288): Sequence length of one video embedding. Example: ```python >>> from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoConfig, CLIPVisionConfig, LlamaConfig >>> # Initializing a CLIP-vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> configuration = LlavaNextVideoConfig(vision_config, text_config) >>> model = LlavaNextVideoForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vision_config=None, text_config=None, image_token_index=32001, projector_hidden_act='gelu', multimodal_projector_bias=True, vision_feature_select_strategy='default', vision_feature_layer=-2, image_grid_pinpoints=None, tie_word_embeddings=False, video_token_index=32000, spatial_pool_mode='average', spatial_pool_stride=2, image_seq_length=576, video_seq_length=288, **kwargs): pass
2
1
72
7
65
0
9
0.81
1
3
0
0
1
14
1
33
140
17
68
36
48
55
30
18
28
9
2
1
9
3,428
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/modular_llava_next_video.py
transformers.models.llava_next_video.modular_llava_next_video.LlavaNextVideoForConditionalGeneration
from ...processing_utils import Unpack import torch from typing import Optional, Union from transformers.models.llava_next.modeling_llava_next import LlavaNextCausalLMOutputWithPast, LlavaNextForConditionalGeneration, LlavaNextModel, LlavaNextModelOutputWithPast, LlavaNextMultiModalProjector, TransformersKwargs, image_size_to_num_patches from ...cache_utils import Cache class LlavaNextVideoForConditionalGeneration(LlavaNextForConditionalGeneration): def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): return self.model.get_video_features(pixel_values=pixel_values, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy) def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_values_videos: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, LlavaNextVideoCausalLMOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> import av >>> from transformers import AutoProcessor, LlavaNextVideoForConditionalGeneration >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> model = LlavaNextVideoForConditionalGeneration.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf", device_map="auto") >>> processor = AutoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf") >>> prompt = "USER: <video>\\nWhy is this video funny? ASSISTANT:" >>> video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset") >>> container = av.open(video_path) >>> # sample uniformly 8 frames from the video (model was trained with 32 frames per video, but this video is short) >>> total_frames = container.streams.video[0].frames >>> indices = np.arange(0, total_frames, total_frames / 8).astype(int) >>> clip = read_video_pyav(container, indices) >>> inputs_video = processor(text=prompt, videos=clip, return_tensors="pt").to(model.device) >>> # load an image to generate from an image >>> prompt = "USER:<image>\\nWhat is shown in this image? ASSISTANT:" >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs_image = processor(text=prompt, images=image, return_tensors="pt").to(model.device) >>> # Generate from video >>> generate_ids = model.generate(**inputs_video, max_length=50) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER:\\nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and endearing sight of a baby wearing glasses and (...)" >>> # Generate from image >>> generate_ids = model.generate(**inputs_image, max_length=30) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER: \\nWhat's the content of the image? ASSISTANT: The image shows a red stop sign on a pole, with a traditional Chinese archway (...)" ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, image_sizes=image_sizes, **kwargs) hidden_states = outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs) return LlavaNextVideoCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, video_hidden_states=outputs.video_hidden_states) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, pixel_values_videos=None, image_sizes=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs): model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs) if cache_position[0] == 0: model_inputs['pixel_values'] = pixel_values model_inputs['pixel_values_videos'] = pixel_values_videos model_inputs['image_sizes'] = image_sizes return model_inputs
class LlavaNextVideoForConditionalGeneration(LlavaNextForConditionalGeneration): def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_values_videos: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, LlavaNextVideoCausalLMOutputWithPast]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> import av >>> from transformers import AutoProcessor, LlavaNextVideoForConditionalGeneration >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> model = LlavaNextVideoForConditionalGeneration.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf", device_map="auto") >>> processor = AutoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf") >>> prompt = "USER: <video>\nWhy is this video funny? ASSISTANT:" >>> video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset") >>> container = av.open(video_path) >>> # sample uniformly 8 frames from the video (model was trained with 32 frames per video, but this video is short) >>> total_frames = container.streams.video[0].frames >>> indices = np.arange(0, total_frames, total_frames / 8).astype(int) >>> clip = read_video_pyav(container, indices) >>> inputs_video = processor(text=prompt, videos=clip, return_tensors="pt").to(model.device) >>> # load an image to generate from an image >>> prompt = "USER:<image>\nWhat is shown in this image? ASSISTANT:" >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs_image = processor(text=prompt, images=image, return_tensors="pt").to(model.device) >>> # Generate from video >>> generate_ids = model.generate(**inputs_video, max_length=50) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER:\nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and endearing sight of a baby wearing glasses and (...)" >>> # Generate from image >>> generate_ids = model.generate(**inputs_image, max_length=30) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER: \nWhat's the content of the image? ASSISTANT: The image shows a red stop sign on a pole, with a traditional Chinese archway (...)" ```''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, pixel_values_videos=None, image_sizes=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs): pass
4
1
72
7
42
23
6
0.54
1
10
3
0
5
3
5
22
364
37
212
79
163
115
97
36
91
19
3
2
32
3,429
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/modular_llava_next_video.py
transformers.models.llava_next_video.modular_llava_next_video.LlavaNextVideoPooler
from torch import nn import math class LlavaNextVideoPooler(nn.Module): def __init__(self, config): super().__init__() mode = config.spatial_pool_mode stride = config.spatial_pool_stride out_channels = getattr(config, 'spatial_pool_out_channels', config.vision_config.hidden_size) self.image_size = (config.vision_config.image_size // config.vision_config.patch_size) ** 2 if mode == 'average': self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride) elif mode == 'max': self.pool = nn.MaxPool2d(kernel_size=stride, stride=stride) elif mode == 'conv': self.pool = nn.Conv2d(in_channels=config.vision_config.hidden_size, out_channels=out_channels, kernel_size=stride, stride=stride) else: raise ValueError(f'Unknown pooling mode: {mode}. Has to be one of [`average`, `max`, `conv`]') def forward(self, image_features): ori_width = int(math.sqrt(image_features.shape[1] * self.image_size // self.image_size)) ori_height = int(ori_width * self.image_size // self.image_size) batch_size, _, dim = image_features.shape image_features_spatial = image_features.view(batch_size, ori_height, ori_height, dim).permute(0, 3, 1, 2) image_features_spatial_pool = self.pool(image_features_spatial) return image_features_spatial_pool.flatten(2).transpose(1, 2).contiguous()
class LlavaNextVideoPooler(nn.Module): def __init__(self, config): pass def forward(self, image_features): pass
3
0
15
2
13
0
3
0
1
3
0
0
2
2
2
12
32
5
27
13
24
0
19
13
16
4
1
1
5
3,430
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_next_video/processing_llava_next_video.py
transformers.models.llava_next_video.processing_llava_next_video.LlavaNextVideoProcessor
from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...image_processing_utils import select_best_resolution from ...video_utils import VideoInput import numpy as np from ...image_utils import ImageInput, get_image_size, to_numpy_array from typing import Optional, Union from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack from ...feature_extraction_utils import BatchFeature class LlavaNextVideoProcessor(ProcessorMixin): """ Constructs a LLaVa-NeXT-Video processor which wraps a LLaVa-NeXT image processor, LLaVa-NeXT-Video video processor and a LLaMa tokenizer into a single processor. [`LlavaNextVideoProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`], [`LlavaNextVideoImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaNextVideoProcessor.__call__`] and [`~LlavaNextVideoProcessor.decode`] for more information. Args: video_processor ([`LlavaNextVideoVideoProcessor`], *optional*): The video processor is a required input. image_processor ([`LlavaNextImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): Jinja chat template that will be used in tokenizer's `apply_chat_template` patch_size (`int`, *optional*): Patch size from the vision tower. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Should be same as in model's config video_token (`str`, *optional*, defaults to `"<video>"`): Special token used to denote video location. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. num_additional_image_tokens (`int`, *optional*, defaults to 0): Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other extra tokens appended, no need to set this arg. """ attributes = ['video_processor', 'image_processor', 'tokenizer'] image_processor_class = ('LlavaNextImageProcessor', 'LlavaNextImageProcessorFast') video_processor_class = 'AutoVideoProcessor' tokenizer_class = ('LlamaTokenizer', 'LlamaTokenizerFast') def __init__(self, video_processor=None, image_processor=None, tokenizer=None, chat_template=None, patch_size=None, vision_feature_select_strategy=None, video_token='<video>', image_token='<image>', num_additional_image_tokens=0, **kwargs): self.patch_size = patch_size self.num_additional_image_tokens = num_additional_image_tokens self.vision_feature_select_strategy = vision_feature_select_strategy self.image_token = tokenizer.image_token if hasattr(tokenizer, 'image_token') else image_token self.video_token = tokenizer.video_token if hasattr(tokenizer, 'video_token') else video_token self.image_token_id = tokenizer.image_token_id if getattr(tokenizer, 'image_token_id', None) else tokenizer.convert_tokens_to_ids(self.image_token) self.video_token_id = tokenizer.video_token_id if getattr(tokenizer, 'video_token_id', None) else tokenizer.convert_tokens_to_ids(self.video_token) super().__init__(video_processor, image_processor, tokenizer, chat_template=chat_template) def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos: Optional[VideoInput]=None, **kwargs: Unpack[LlavaNextVideoProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. To prepare the video(s), this method forwards the `videos` and `kwargs` arguments to LlavaNextVideoImageProcessor's [`~LlavaNextVideoImageProcessor.__call__`] if `videos` is not `None`. Please refer to the docstring of the above two methods for more information. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs(LlavaNextVideoProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) if images is not None: image_inputs = self.image_processor(images, **output_kwargs['images_kwargs']) else: image_inputs = {} if videos is not None: videos_inputs = self.video_processor(videos, **output_kwargs['videos_kwargs']) else: videos_inputs = {} if isinstance(text, str): text = [text] elif not isinstance(text, list) and (not isinstance(text[0], str)): raise TypeError('Invalid input text. Please provide a string, or a list of strings') if image_inputs: image_sizes = iter(image_inputs['image_sizes']) height, width = get_image_size(to_numpy_array(image_inputs['pixel_values'][0][0])) prompt_strings = [] for sample in text: while self.image_token in sample: image_size = next(image_sizes) if not isinstance(image_size, (list, tuple)): image_size = image_size.tolist() orig_height, orig_width = image_size num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width) if self.vision_feature_select_strategy == 'default': num_image_tokens -= 1 sample = sample.replace(self.image_token, '<placeholder>' * num_image_tokens, 1) prompt_strings.append(sample) text = [sample.replace('<placeholder>', self.image_token) for sample in prompt_strings] if videos_inputs: one_video = videos_inputs.get('pixel_values_videos')[0] if isinstance(one_video, (list, tuple)): one_video = np.array(one_video) else: one_video = to_numpy_array(one_video) height, width = get_image_size(one_video[0]) num_frames = one_video.shape[0] num_image_tokens = height // self.patch_size * (width // self.patch_size) num_video_tokens = num_image_tokens // 4 * num_frames prompt_strings = [] for sample in text: sample = sample.replace(self.video_token, self.video_token * num_video_tokens) prompt_strings.append(sample) text = prompt_strings return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None) text_inputs = self.tokenizer(text, **output_kwargs['text_kwargs']) self._check_special_mm_tokens(text, text_inputs, modalities=['image', 'video']) return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors) def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int: image_grid_pinpoints = self.image_processor.image_grid_pinpoints height_best_resolution, width_best_resolution = select_best_resolution([orig_height, orig_width], image_grid_pinpoints) scale_height, scale_width = (height_best_resolution // height, width_best_resolution // width) patches_height = height // self.patch_size patches_width = width // self.patch_size unpadded_features, newline_features = self._get_unpadded_features(orig_height, orig_width, patches_height, patches_width, scale_height, scale_width) base_features = patches_height * patches_width + self.num_additional_image_tokens num_image_tokens = unpadded_features + newline_features + base_features return num_image_tokens def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width): """ Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA because it divided each image into patches depending on its resolution. Therefore we need to calculate how many patches an image is divided into and get the number of features from that. """ current_height = patches_height * scale_height current_width = patches_width * scale_width original_aspect_ratio = width / height current_aspect_ratio = current_width / current_height if original_aspect_ratio > current_aspect_ratio: new_height = int(round(height * (current_width / width), 7)) padding = (current_height - new_height) // 2 current_height -= padding * 2 else: new_width = int(round(width * (current_height / height), 7)) padding = (current_width - new_width) // 2 current_width -= padding * 2 unpadded_features = current_height * current_width newline_features = current_height return (unpadded_features, newline_features) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: images_kwargs = LlavaNextVideoProcessorKwargs._defaults.get('images_kwargs', {}) images_kwargs.update(kwargs) size = images_kwargs.get('size', None) or self.image_processor.size size = (size['shortest_edge'], size['shortest_edge']) if 'shortest_edge' in size else (min(size['height'], size['width']), min(size['height'], size['width'])) processed_height, processed_width = size batch_num_image_tokens = [] num_image_patches = [1] * len(image_sizes) for image_size in image_sizes: orig_height, orig_width = image_size num_image_tokens = self._get_number_of_features(orig_height, orig_width, processed_height, processed_width) if self.vision_feature_select_strategy == 'default': num_image_tokens -= 1 batch_num_image_tokens.append(num_image_tokens) vision_data.update({'num_image_tokens': batch_num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data)
class LlavaNextVideoProcessor(ProcessorMixin): ''' Constructs a LLaVa-NeXT-Video processor which wraps a LLaVa-NeXT image processor, LLaVa-NeXT-Video video processor and a LLaMa tokenizer into a single processor. [`LlavaNextVideoProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`], [`LlavaNextVideoImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaNextVideoProcessor.__call__`] and [`~LlavaNextVideoProcessor.decode`] for more information. Args: video_processor ([`LlavaNextVideoVideoProcessor`], *optional*): The video processor is a required input. image_processor ([`LlavaNextImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): Jinja chat template that will be used in tokenizer's `apply_chat_template` patch_size (`int`, *optional*): Patch size from the vision tower. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Should be same as in model's config video_token (`str`, *optional*, defaults to `"<video>"`): Special token used to denote video location. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. num_additional_image_tokens (`int`, *optional*, defaults to 0): Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other extra tokens appended, no need to set this arg. ''' def __init__(self, video_processor=None, image_processor=None, tokenizer=None, chat_template=None, patch_size=None, vision_feature_select_strategy=None, video_token='<video>', image_token='<image>', num_additional_image_tokens=0, **kwargs): pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos: Optional[VideoInput]=None, **kwargs: Unpack[LlavaNextVideoProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. To prepare the video(s), this method forwards the `videos` and `kwargs` arguments to LlavaNextVideoImageProcessor's [`~LlavaNextVideoImageProcessor.__call__`] if `videos` is not `None`. Please refer to the docstring of the above two methods for more information. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. ''' pass def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int: pass def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width): ''' Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA because it divided each image into patches depending on its resolution. Therefore we need to calculate how many patches an image is divided into and get the number of features from that. ''' pass def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): ''' Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. ''' pass
6
4
28
2
17
9
3
0.75
1
10
2
0
7
5
7
24
253
24
132
72
102
99
88
50
80
13
2
4
22
3,431
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/configuration_llava_onevision.py
transformers.models.llava_onevision.configuration_llava_onevision.LlavaOnevisionConfig
from ...configuration_utils import PretrainedConfig from ..auto import CONFIG_MAPPING, AutoConfig class LlavaOnevisionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`LlavaOnevisionForConditionalGeneration`]. It is used to instantiate an Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [llava-hf/llava-onevision-qwen2-7b-ov-hf](https://huggingface.co/llava-hf/llava-onevision-qwen2-7b-ov-hf) model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 151646): The image token index to encode the image prompt. video_token_index (`int`, *optional*, defaults to 151647): The video token index to encode the video prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"full"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. If `"full"`, the full vision features are used. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -1): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`): Aspect ratio used when processong image features. The default value is "anyres_max_9". image_grid_pinpoints (`List`, *optional*): A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list of the form `(height, width)`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. Example: ```python >>> from transformers import LlavaOnevisionForConditionalGeneration, LlavaOnevisionConfig, SiglipVisionConfig, Qwen2Config >>> # Initializing a CLIP-vision config >>> vision_config = SiglipVisionConfig() >>> # Initializing a Llama config >>> text_config = Qwen2Config() >>> # Initializing a Llava-Next llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration >>> configuration = LlavaOnevisionConfig(vision_config, text_config) >>> # Initializing a model from the llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration >>> model = LlavaOnevisionForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'llava_onevision' attribute_map = {'image_token_id': 'image_token_index', 'video_token_id': 'video_token_index'} sub_configs = {'text_config': AutoConfig, 'vision_config': AutoConfig} def __init__(self, vision_config=None, text_config=None, image_token_index=151646, video_token_index=151647, projector_hidden_act='gelu', vision_feature_select_strategy='full', vision_feature_layer=-1, vision_aspect_ratio='anyres_max_9', image_grid_pinpoints=None, tie_word_embeddings=False, multimodal_projector_bias=True, **kwargs): self.image_token_index = image_token_index self.video_token_index = video_token_index self.projector_hidden_act = projector_hidden_act self.multimodal_projector_bias = multimodal_projector_bias if vision_feature_select_strategy not in ['default', 'full']: raise ValueError(f"vision_feature_select_strategy should be one of 'default', 'full'.Got: {vision_feature_select_strategy}") self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.vision_aspect_ratio = vision_aspect_ratio image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else [[384, 384], [384, 768], [384, 1152], [384, 1536], [384, 1920], [384, 2304], [768, 384], [768, 768], [768, 1152], [768, 1536], [768, 1920], [768, 2304], [1152, 384], [1152, 768], [1152, 1152], [1152, 1536], [1152, 1920], [1152, 2304], [1536, 384], [1536, 768], [1536, 1152], [1536, 1536], [1536, 1920], [1536, 2304], [1920, 384], [1920, 768], [1920, 1152], [1920, 1536], [1920, 1920], [1920, 2304], [2304, 384], [2304, 768], [2304, 1152], [2304, 1536], [2304, 1920], [2304, 2304]] self.image_grid_pinpoints = image_grid_pinpoints if isinstance(vision_config, dict): vision_config['model_type'] = vision_config.get('model_type', 'siglip_vision_model') vision_config = CONFIG_MAPPING[vision_config['model_type']](**vision_config) elif vision_config is None: vision_config = CONFIG_MAPPING['siglip_vision_model'](hidden_size=1152, intermediate_size=4304, patch_size=14, image_size=384, num_hidden_layers=26, num_attention_heads=16, vision_use_head=False) self.vision_config = vision_config if isinstance(text_config, dict): text_config['model_type'] = text_config.get('model_type', 'qwen2') text_config = CONFIG_MAPPING[text_config['model_type']](**text_config) elif text_config is None: text_config = CONFIG_MAPPING['qwen2']() self.text_config = text_config super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
class LlavaOnevisionConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`LlavaOnevisionForConditionalGeneration`]. It is used to instantiate an Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [llava-hf/llava-onevision-qwen2-7b-ov-hf](https://huggingface.co/llava-hf/llava-onevision-qwen2-7b-ov-hf) model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 151646): The image token index to encode the image prompt. video_token_index (`int`, *optional*, defaults to 151647): The video token index to encode the video prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"full"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. If `"full"`, the full vision features are used. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -1): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`): Aspect ratio used when processong image features. The default value is "anyres_max_9". image_grid_pinpoints (`List`, *optional*): A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list of the form `(height, width)`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. Example: ```python >>> from transformers import LlavaOnevisionForConditionalGeneration, LlavaOnevisionConfig, SiglipVisionConfig, Qwen2Config >>> # Initializing a CLIP-vision config >>> vision_config = SiglipVisionConfig() >>> # Initializing a Llama config >>> text_config = Qwen2Config() >>> # Initializing a Llava-Next llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration >>> configuration = LlavaOnevisionConfig(vision_config, text_config) >>> # Initializing a model from the llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration >>> model = LlavaOnevisionForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vision_config=None, text_config=None, image_token_index=151646, video_token_index=151647, projector_hidden_act='gelu', vision_feature_select_strategy='full', vision_feature_layer=-1, vision_aspect_ratio='anyres_max_9', image_grid_pinpoints=None, tie_word_embeddings=False, multimodal_projector_bias=True, **kwargs): pass
2
1
100
7
93
0
9
0.51
1
3
0
0
1
10
1
1
163
18
96
28
80
49
26
14
24
9
1
1
9
3,432
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/image_processing_llava_onevision.py
transformers.models.llava_onevision.image_processing_llava_onevision.LlavaOnevisionImageProcessor
from ...utils import TensorType, is_vision_available, logging from typing import Optional, Union from ...image_transforms import PaddingMode, convert_to_rgb, pad, resize, to_channel_dimension_format import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_patch_output_size, get_size_dict, select_best_resolution from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from collections.abc import Iterable class LlavaOnevisionImageProcessor(BaseImageProcessor): """ Constructs a LLaVa-Onevision image processor. Based on [`SiglipImageProcessor`] with incorporation of processing each video frame. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess` method. Not used for processing videos. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ['pixel_values', 'image_sizes', 'batch_num_images'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, image_grid_pinpoints: Optional[list]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=True, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 384, 'width': 384} size = get_size_dict(size, default_to_square=False) image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else [[384, 384], [384, 768], [384, 1152], [384, 1536], [384, 1920], [384, 2304], [768, 384], [768, 768], [768, 1152], [768, 1536], [768, 1920], [768, 2304], [1152, 384], [1152, 768], [1152, 1152], [1152, 1536], [1152, 1920], [1152, 2304], [1536, 384], [1536, 768], [1536, 1152], [1536, 1536], [1536, 1920], [1536, 2304], [1920, 384], [1920, 768], [1920, 1152], [1920, 1536], [1920, 1920], [1920, 2304], [2304, 384], [2304, 768], [2304, 1152], [2304, 1536], [2304, 1920], [2304, 2304]] self.do_resize = do_resize self.size = size self.image_grid_pinpoints = image_grid_pinpoints self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_pad = do_pad self.do_convert_rgb = do_convert_rgb def pad(self, image: np.ndarray, padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]], mode: PaddingMode=PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]]=0.0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: """ Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`) dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected as input. Args: image (`np.ndarray`): The image to pad. padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`): Padding to apply to the edges of the height, width axes. Can be one of three formats: - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis. - `((before, after),)` yields same before and after pad for height and width. - `(pad,)` or int is a shortcut for before = after = pad width for all axes. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. """ if isinstance(padding, int) or len(padding) != 4: return pad(image, padding, mode, constant_values, data_format, input_data_format) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if mode == PaddingMode.CONSTANT: image = np.pad(image, padding, mode='constant', constant_values=constant_values) elif mode == PaddingMode.REFLECT: image = np.pad(image, padding, mode='reflect') elif mode == PaddingMode.REPLICATE: image = np.pad(image, padding, mode='edge') elif mode == PaddingMode.SYMMETRIC: image = np.pad(image, padding, mode='symmetric') else: raise ValueError(f'Invalid padding mode: {mode}') image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image def _resize_for_patching(self, image: np.ndarray, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array: """ Resizes an image to a target resolution while maintaining aspect ratio. Args: image (np.array): The input image. target_resolution (tuple): The target resolution (height, width) of the image. resample (`PILImageResampling`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: np.array: The resized and padded image. """ new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format) resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format) return resized_image def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple): original_height, original_width = original_resolution target_height, target_width = target_resolution paste_x, r_x = divmod(target_width - original_width, 2) paste_y, r_y = divmod(target_height - original_height, 2) return ((paste_y, paste_y + r_y), (paste_x, paste_x + r_x)) def _pad_for_patching(self, image: np.ndarray, target_resolution: tuple, input_data_format: ChannelDimension) -> np.array: """ Pad an image to a target resolution while maintaining aspect ratio. """ new_resolution = get_patch_output_size(image, target_resolution, input_data_format) padding = self._get_padding_size(new_resolution, target_resolution) padded_image = self.pad(image, padding=padding) return padded_image def get_image_patches(self, image: np.ndarray, grid_pinpoints, size: tuple, patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> list[np.array]: """ Process an image with variable resolutions by dividing it into patches. Args: image (np.array): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. resample (`PILImageResampling`): Resampling filter to use if resizing the image. data_format (`ChannelDimension` or `str`): The channel dimension format for the output image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: list[np.array]: A list of NumPy arrays containing the processed image patches. """ if not isinstance(grid_pinpoints, list): raise TypeError('grid_pinpoints must be a list of possible resolutions.') possible_resolutions = grid_pinpoints image_size = get_image_size(image, channel_dim=input_data_format) best_resolution = select_best_resolution(image_size, possible_resolutions) resized_image = self._resize_for_patching(image, best_resolution, resample=resample, input_data_format=input_data_format) padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format) patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format) patches = [to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format) for patch in patches] resized_original_image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format) image_patches = [resized_original_image] + patches return image_patches def _pad_for_batching(self, pixel_values: list[np.ndarray], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[np.ndarray]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: list[`np.ndarray`]: The padded images. """ max_patch = max((len(x) for x in pixel_values)) pixel_values = [self.pad(image, padding=((0, max_patch - image.shape[0]), (0, 0), (0, 0), (0, 0)), data_format=data_format, input_data_format=input_data_format) for image in pixel_values] return pixel_values def pad_to_square(self, image: np.ndarray, background_color: Union[int, tuple[int, int, int]]=0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array: """ Pads an image to a square based on the longest edge. Args: image (`np.ndarray`): The image to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. """ height, width = get_image_size(image, input_data_format) num_channels = image.shape[0] if input_data_format == ChannelDimension.FIRST else image.shape[-1] if height == width: image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image max_dim = max(height, width) if isinstance(background_color, int): background_color = [background_color] elif len(background_color) != num_channels: raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels') if input_data_format == ChannelDimension.FIRST: result = np.zeros((num_channels, max_dim, max_dim), dtype=image.dtype) for i, color in enumerate(background_color): result[i, :, :] = color if width > height: start = (max_dim - height) // 2 result[:, start:start + height, :] = image else: start = (max_dim - width) // 2 result[:, :, start:start + width] = image else: result = np.zeros((max_dim, max_dim, num_channels), dtype=image.dtype) for i, color in enumerate(background_color): result[:, :, i] = color if width > height: start = (max_dim - height) // 2 result[start:start + height, :, :] = image else: start = (max_dim - width) // 2 result[:, start:start + width, :] = image image = to_channel_dimension_format(result, data_format, input_data_format) if data_format is not None else result return image def _preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Image.Image: """ Args: images (`ImageInput`): Batch of frames (one video) to preprocess. Expects a batch of frames with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ if do_resize: images = [resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] return images def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, image_grid_pinpoints: Optional[list]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None): """ Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. image_grid_pinpoints (`List` *optional*, defaults to `self.image_grid_pinpoints`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else self.image_grid_pinpoints resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb if isinstance(images, (tuple, list)) and isinstance(images[0], (tuple, list)): images = [x for x in images if x] batch_num_images = [len(x) for x in images] elif isinstance(images, (tuple, list)): batch_num_images = [1] * len(images) else: batch_num_images = [1] need_patching = [n == 1 for n in batch_num_images for _ in range(n)] images = self.fetch_images(images) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) size_tuple = (size['height'], size['width']) if 'height' in size and 'width' in size else (size['shortest_edge'], size['shortest_edge']) processed_images = [] image_sizes = [get_image_size(image, channel_dim=input_data_format) for image in images] for i, image in enumerate(images): if need_patching[i]: image_patches = self.get_image_patches(image, image_grid_pinpoints, size=size_tuple, patch_size=size_tuple[0], resample=resample, data_format=input_data_format, input_data_format=input_data_format) else: padded_image = self.pad_to_square(image=image, background_color=tuple((int(x * 255) for x in self.image_mean)), input_data_format=input_data_format) image_patches = [padded_image] pixel_values = self._preprocess(image_patches, do_resize=do_resize, size=size_tuple, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) pixel_values = np.array(pixel_values) processed_images.append(pixel_values) if do_pad: processed_images = self._pad_for_batching(processed_images) return BatchFeature(data={'pixel_values': processed_images, 'image_sizes': image_sizes, 'batch_num_images': batch_num_images}, tensor_type=return_tensors)
class LlavaOnevisionImageProcessor(BaseImageProcessor): ''' Constructs a LLaVa-Onevision image processor. Based on [`SiglipImageProcessor`] with incorporation of processing each video frame. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess` method. Not used for processing videos. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, image_grid_pinpoints: Optional[list]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=True, do_convert_rgb: bool=True, **kwargs) -> None: pass def pad(self, image: np.ndarray, padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]], mode: PaddingMode=PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]]=0.0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: ''' Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`) dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected as input. Args: image (`np.ndarray`): The image to pad. padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`): Padding to apply to the edges of the height, width axes. Can be one of three formats: - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis. - `((before, after),)` yields same before and after pad for height and width. - `(pad,)` or int is a shortcut for before = after = pad width for all axes. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. ''' pass def _resize_for_patching(self, image: np.ndarray, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array: ''' Resizes an image to a target resolution while maintaining aspect ratio. Args: image (np.array): The input image. target_resolution (tuple): The target resolution (height, width) of the image. resample (`PILImageResampling`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: np.array: The resized and padded image. ''' pass def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple): pass def _pad_for_patching(self, image: np.ndarray, target_resolution: tuple, input_data_format: ChannelDimension) -> np.array: ''' Pad an image to a target resolution while maintaining aspect ratio. ''' pass def get_image_patches(self, image: np.ndarray, grid_pinpoints, size: tuple, patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> list[np.array]: ''' Process an image with variable resolutions by dividing it into patches. Args: image (np.array): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. resample (`PILImageResampling`): Resampling filter to use if resizing the image. data_format (`ChannelDimension` or `str`): The channel dimension format for the output image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: list[np.array]: A list of NumPy arrays containing the processed image patches. ''' pass def _pad_for_batching(self, pixel_values: list[np.ndarray], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): ''' Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[np.ndarray]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: list[`np.ndarray`]: The padded images. ''' pass def pad_to_square(self, image: np.ndarray, background_color: Union[int, tuple[int, int, int]]=0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array: ''' Pads an image to a square based on the longest edge. Args: image (`np.ndarray`): The image to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. ''' pass def _preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Image.Image: ''' Args: images (`ImageInput`): Batch of frames (one video) to preprocess. Expects a batch of frames with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, image_grid_pinpoints: Optional[list]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None): ''' Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. image_grid_pinpoints (`List` *optional*, defaults to `self.image_grid_pinpoints`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
11
9
64
5
36
23
5
0.77
1
12
3
0
8
11
8
28
569
51
292
114
212
226
103
43
94
19
3
1
41
3,433
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/image_processing_llava_onevision_fast.py
transformers.models.llava_onevision.image_processing_llava_onevision_fast.LlavaOnevisionImageProcessorFast
from ...image_processing_utils_fast import BaseImageProcessorFast, DefaultFastImageProcessorKwargs, divide_to_patches, group_images_by_shape, reorder_images import torch from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, get_image_size from ...processing_utils import Unpack from ...image_processing_utils import BatchFeature, get_patch_output_size, select_best_resolution from ...utils import TensorType, auto_docstring, is_torchvision_v2_available from typing import Optional, Union @auto_docstring class LlavaOnevisionImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {'height': 384, 'width': 384} default_to_square = False crop_size = None do_resize = True do_center_crop = None do_rescale = True do_normalize = True do_convert_rgb = True do_pad = True image_grid_pinpoints = [[384, 384], [384, 768], [384, 1152], [384, 1536], [384, 1920], [384, 2304], [768, 384], [768, 768], [768, 1152], [768, 1536], [768, 1920], [768, 2304], [1152, 384], [1152, 768], [1152, 1152], [1152, 1536], [1152, 1920], [1152, 2304], [1536, 384], [1536, 768], [1536, 1152], [1536, 1536], [1536, 1920], [1536, 2304], [1920, 384], [1920, 768], [1920, 1152], [1920, 1536], [1920, 1920], [1920, 2304], [2304, 384], [2304, 768], [2304, 1152], [2304, 1536], [2304, 1920], [2304, 2304]] valid_kwargs = LlavaOnevisionFastImageProcessorKwargs model_input_names = ['pixel_values', 'image_sizes', 'batch_num_images'] def __init__(self, **kwargs: Unpack[LlavaOnevisionFastImageProcessorKwargs]): super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaOnevisionFastImageProcessorKwargs]) -> BatchFeature: if isinstance(images, (tuple, list)) and isinstance(images[0], (tuple, list)): batch_num_images = [len(x) for x in images] elif isinstance(images, (tuple, list)): batch_num_images = [1] * len(images) else: batch_num_images = [1] kwargs['batch_num_images'] = batch_num_images return super().preprocess(images, **kwargs) def _resize_for_patching(self, image: 'torch.Tensor', target_resolution: tuple, interpolation: 'F.InterpolationMode', input_data_format: ChannelDimension) -> 'torch.Tensor': """ Resizes an image to a target resolution while maintaining aspect ratio. Args: image ("torch.Tensor"): The input image. target_resolution (tuple): The target resolution (height, width) of the image. interpolation (`InterpolationMode`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: "torch.Tensor": The resized and padded image. """ new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format) resized_image = self.resize(image=image, size=SizeDict(height=new_height, width=new_width), interpolation=interpolation) return resized_image def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple): original_height, original_width = original_resolution target_height, target_width = target_resolution paste_x, r_x = divmod(target_width - original_width, 2) paste_y, r_y = divmod(target_height - original_height, 2) return [paste_x, paste_y, paste_x + r_x, paste_y + r_y] def _pad_for_patching(self, image: 'torch.Tensor', target_resolution: tuple, input_data_format: ChannelDimension) -> 'torch.Tensor': """ Pad an image to a target resolution while maintaining aspect ratio. """ new_resolution = get_patch_output_size(image, target_resolution, input_data_format) padding = self._get_padding_size(new_resolution, target_resolution) padded_image = F.pad(image, padding=padding) return padded_image def _get_image_patches(self, image: 'torch.Tensor', grid_pinpoints, size: tuple, patch_size: int, interpolation: 'F.InterpolationMode') -> list['torch.Tensor']: """ Process an image with variable resolutions by dividing it into patches. Args: image ("torch.Tensor"): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. interpolation (`"InterpolationMode"`): Resampling filter to use if resizing the image. Returns: list["torch.Tensor"]: A list of NumPy arrays containing the processed image patches. """ if not isinstance(grid_pinpoints, list): raise TypeError('grid_pinpoints must be a list of possible resolutions.') possible_resolutions = grid_pinpoints image_size = get_image_size(image, channel_dim=ChannelDimension.FIRST) best_resolution = select_best_resolution(image_size, possible_resolutions) resized_image = self._resize_for_patching(image, best_resolution, interpolation=interpolation, input_data_format=ChannelDimension.FIRST) padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=ChannelDimension.FIRST) patches = divide_to_patches(padded_image, patch_size=patch_size) resized_original_image = F.resize(image, size=size, interpolation=interpolation) image_patches = [resized_original_image] + patches return image_patches def _pad_for_batching(self, pixel_values: list['torch.Tensor']) -> list['torch.Tensor']: """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[torch.Tensor]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) Returns: list[`torch.Tensor`]: The padded images. """ max_patch = max((len(x) for x in pixel_values)) pixel_values = [torch.nn.functional.pad(image, pad=[0, 0, 0, 0, 0, 0, 0, max_patch - image.shape[0]]) for image in pixel_values] return pixel_values def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: SizeDict, image_grid_pinpoints: list[list[int]], interpolation: Optional['F.InterpolationMode'], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, batch_num_images: list[int], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: processed_images = [] image_sizes = [] need_patching = [n == 1 for n in batch_num_images for _ in range(n)] if size and size.height and size.width: size_tuple = (size.height, size.width) else: size_tuple = (size.shortest_edge, size.shortest_edge) if crop_size and crop_size.height: patch_size = crop_size.height elif size and size.height: patch_size = size.height else: patch_size = size.shortest_edge for i, image in enumerate(images): if need_patching[i]: image_patches = self._get_image_patches(image, image_grid_pinpoints, size=size_tuple, patch_size=patch_size, interpolation=interpolation) else: padded_image = self.pad_to_square(images=image, background_color=tuple((int(x * 255) for x in self.image_mean))) image_patches = [padded_image] processed_image_patches_grouped = {} grouped_image_patches, grouped_image_patches_index = group_images_by_shape(image_patches, disable_grouping=disable_grouping) for shape, stacked_image_patches in grouped_image_patches.items(): if do_resize: stacked_image_patches = self.resize(image=stacked_image_patches, size=size, interpolation=interpolation) if do_center_crop: stacked_image_patches = self.center_crop(stacked_image_patches, crop_size) stacked_image_patches = self.rescale_and_normalize(stacked_image_patches, do_rescale, rescale_factor, do_normalize, image_mean, image_std) processed_image_patches_grouped[shape] = stacked_image_patches processed_image_patches = reorder_images(processed_image_patches_grouped, grouped_image_patches_index) processed_image_patches = torch.stack(processed_image_patches, dim=0) if return_tensors else processed_image_patches processed_images.append(processed_image_patches) image_sizes.append(get_image_size(image, ChannelDimension.FIRST)) if do_pad: processed_images = self._pad_for_batching(processed_images) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={'pixel_values': processed_images, 'image_sizes': image_sizes, 'batch_num_images': batch_num_images}, tensor_type=return_tensors) def pad_to_square(self, images: 'torch.Tensor', background_color: Union[int, tuple[int, int, int]]=0) -> 'torch.Tensor': """ Pads an image to a square based on the longest edge. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. """ height, width = get_image_size(images, ChannelDimension.FIRST) if height == width: return images num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0] if isinstance(background_color, int): background_color = [background_color] + [0] * (num_channels - 1) elif len(background_color) != num_channels: raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels') max_dim = max(height, width) paste_x_left = (max_dim - width) // 2 paste_y_left = (max_dim - height) // 2 paste_x_right = max_dim - width - paste_x_left paste_y_right = max_dim - height - paste_y_left padded_images = F.pad(images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color) return padded_images
@auto_docstring class LlavaOnevisionImageProcessorFast(BaseImageProcessorFast): def __init__(self, **kwargs: Unpack[LlavaOnevisionFastImageProcessorKwargs]): pass @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaOnevisionFastImageProcessorKwargs]) -> BatchFeature: pass def _resize_for_patching(self, image: 'torch.Tensor', target_resolution: tuple, interpolation: 'F.InterpolationMode', input_data_format: ChannelDimension) -> 'torch.Tensor': ''' Resizes an image to a target resolution while maintaining aspect ratio. Args: image ("torch.Tensor"): The input image. target_resolution (tuple): The target resolution (height, width) of the image. interpolation (`InterpolationMode`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: "torch.Tensor": The resized and padded image. ''' pass def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple): pass def _pad_for_patching(self, image: 'torch.Tensor', target_resolution: tuple, input_data_format: ChannelDimension) -> 'torch.Tensor': ''' Pad an image to a target resolution while maintaining aspect ratio. ''' pass def _get_image_patches(self, image: 'torch.Tensor', grid_pinpoints, size: tuple, patch_size: int, interpolation: 'F.InterpolationMode') -> list['torch.Tensor']: ''' Process an image with variable resolutions by dividing it into patches. Args: image ("torch.Tensor"): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. interpolation (`"InterpolationMode"`): Resampling filter to use if resizing the image. Returns: list["torch.Tensor"]: A list of NumPy arrays containing the processed image patches. ''' pass def _pad_for_batching(self, pixel_values: list['torch.Tensor']) -> list['torch.Tensor']: ''' Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[torch.Tensor]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) Returns: list[`torch.Tensor`]: The padded images. ''' pass def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: SizeDict, image_grid_pinpoints: list[list[int]], interpolation: Optional['F.InterpolationMode'], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, batch_num_images: list[int], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: pass def pad_to_square(self, images: 'torch.Tensor', background_color: Union[int, tuple[int, int, int]]=0) -> 'torch.Tensor': ''' Pads an image to a square based on the longest edge. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. ''' pass
12
5
25
3
16
7
2
0.36
1
13
5
0
8
0
8
42
237
30
153
101
94
55
79
51
70
11
4
3
19
3,434
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/modeling_llava_onevision.py
transformers.models.llava_onevision.modeling_llava_onevision.LlavaOnevisionCausalLMOutputWithPast
from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from typing import Optional, Union import torch from ...cache_utils import Cache from dataclasses import dataclass from ...utils import TransformersKwargs, auto_docstring, can_return_tuple @dataclass @auto_docstring(custom_intro='\n Base class for LlavaOnevision causal language model (or autoregressive) outputs.\n ') class LlavaOnevisionCausalLMOutputWithPast(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. video_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`. video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None video_hidden_states: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring(custom_intro='\n Base class for LlavaOnevision causal language model (or autoregressive) outputs.\n ') class LlavaOnevisionCausalLMOutputWithPast(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. video_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`. video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. ''' pass
3
1
0
0
0
0
0
3.5
1
0
0
0
0
0
0
0
42
6
8
8
7
28
8
8
7
0
1
0
0
3,435
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/modeling_llava_onevision.py
transformers.models.llava_onevision.modeling_llava_onevision.LlavaOnevisionForConditionalGeneration
import torch from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...processing_utils import Unpack from ...cache_utils import Cache from ...generation import GenerationMixin from .configuration_llava_onevision import LlavaOnevisionConfig from typing import Optional, Union from torch import nn @auto_docstring(custom_intro='\n The LLAVA-NeXT model which consists of a vision backbone and a language model.\n ') class LlavaOnevisionForConditionalGeneration(LlavaOnevisionPreTrainedModel, GenerationMixin): _checkpoint_conversion_mapping = {'^language_model.model': 'model.language_model', '^vision_tower': 'model.vision_tower', '^multi_modal_projector': 'model.multi_modal_projector', '^image_newline': 'model.image_newline', '^language_model.lm_head': 'lm_head'} _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: LlavaOnevisionConfig): super().__init__(config) self.model = LlavaOnevisionModel(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def get_output_embeddings(self) -> nn.Module: return self.lm_head def set_decoder(self, decoder): self.model.set_decoder(decoder) def get_decoder(self): return self.model.get_decoder() def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None): return self.model.pack_image_features(image_features=image_features, image_sizes=image_sizes, vision_feature_select_strategy=vision_feature_select_strategy, image_newline=image_newline) def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): return self.model.get_image_features(pixel_values=pixel_values, image_sizes=image_sizes, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy) @property def language_model(self): return self.model.language_model @property def vision_tower(self): return self.model.vision_tower @property def multi_modal_projector(self): return self.model.multi_modal_projector @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.LongTensor]=None, pixel_values_videos: Optional[torch.FloatTensor]=None, image_sizes_videos: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, vision_aspect_ratio: Optional[str]=None, batch_num_images: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, LlavaOnevisionCausalLMOutputWithPast]: """ image_sizes_videos (`torch.LongTensor` of shape `(batch_size, frames, 2)`, *optional*): The sizes of the videos in the batch, being (height, width) for each frame in the video. vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`): Aspect ratio used when processong image features. The default value is "anyres_max_9". batch_num_images (`torch.LongTensor`, *optional*): Number of images in each sample. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> import torch >>> from transformers import LlavaOnevisionProcessor, LlavaOnevisionForConditionalGeneration >>> model = LlavaOnevisionForConditionalGeneration.from_pretrained("llava-hf/llava-onevision-qwen2-7b-ov-hf", dtype="float16", device_map="cuda:0") >>> processor = LlavaOnevisionProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-7b-ov-hf") >>> conversation = [ ... { ... "role": "user", ... "content": [ ... {"type": "text", "text": "What is shown in this image?"}, ... {"type": "image"}, ... ], ... }, ... ] >>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) >>> image_file = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> raw_image = Image.open(requests.get(image_file, stream=True).raw) >>> inputs = processor(text=prompt, images=raw_image, return_tensors='pt').to(0, torch.float16) >>> output = model.generate(**inputs, max_new_tokens=20, do_sample=False) >>> processor.batch_decode(output, skip_special_tokens=True)[0] "user\\n\\nWhat is shown in this image?\\nassistant\\ncat" ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy vision_aspect_ratio = vision_aspect_ratio if vision_aspect_ratio is not None else self.config.vision_aspect_ratio outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_sizes=image_sizes, image_sizes_videos=image_sizes_videos, vision_aspect_ratio=vision_aspect_ratio, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, batch_num_images=batch_num_images, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs) hidden_states = outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs) return LlavaOnevisionCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, video_hidden_states=outputs.video_hidden_states) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, image_sizes=None, pixel_values_videos=None, image_sizes_videos=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs): model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs) if cache_position[0] == 0: model_inputs['pixel_values'] = pixel_values model_inputs['image_sizes'] = image_sizes model_inputs['pixel_values_videos'] = pixel_values_videos model_inputs['image_sizes_videos'] = image_sizes_videos return model_inputs @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): return self.model.get_video_features(pixel_values=pixel_values, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy)
@auto_docstring(custom_intro='\n The LLAVA-NeXT model which consists of a vision backbone and a language model.\n ') class LlavaOnevisionForConditionalGeneration(LlavaOnevisionPreTrainedModel, GenerationMixin): def __init__(self, config: LlavaOnevisionConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_output_embeddings(self) -> nn.Module: pass def set_decoder(self, decoder): pass def get_decoder(self): pass def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None): pass def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): pass @property def language_model(self): pass @property def vision_tower(self): pass @property def multi_modal_projector(self): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.LongTensor]=None, pixel_values_videos: Optional[torch.FloatTensor]=None, image_sizes_videos: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, vision_aspect_ratio: Optional[str]=None, batch_num_images: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, LlavaOnevisionCausalLMOutputWithPast]: ''' image_sizes_videos (`torch.LongTensor` of shape `(batch_size, frames, 2)`, *optional*): The sizes of the videos in the batch, being (height, width) for each frame in the video. vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`): Aspect ratio used when processong image features. The default value is "anyres_max_9". batch_num_images (`torch.LongTensor`, *optional*): Number of images in each sample. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> import torch >>> from transformers import LlavaOnevisionProcessor, LlavaOnevisionForConditionalGeneration >>> model = LlavaOnevisionForConditionalGeneration.from_pretrained("llava-hf/llava-onevision-qwen2-7b-ov-hf", dtype="float16", device_map="cuda:0") >>> processor = LlavaOnevisionProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-7b-ov-hf") >>> conversation = [ ... { ... "role": "user", ... "content": [ ... {"type": "text", "text": "What is shown in this image?"}, ... {"type": "image"}, ... ], ... }, ... ] >>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) >>> image_file = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> raw_image = Image.open(requests.get(image_file, stream=True).raw) >>> inputs = processor(text=prompt, images=raw_image, return_tensors='pt').to(0, torch.float16) >>> output = model.generate(**inputs, max_new_tokens=20, do_sample=False) >>> processor.batch_decode(output, skip_special_tokens=True)[0] "user\n\nWhat is shown in this image?\nassistant\ncat" ```''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, image_sizes=None, pixel_values_videos=None, image_sizes_videos=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None): pass
23
2
34
3
23
8
4
0.36
2
13
5
0
13
6
13
14
469
50
308
108
246
111
159
61
145
20
2
3
48
3,436
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/modeling_llava_onevision.py
transformers.models.llava_onevision.modeling_llava_onevision.LlavaOnevisionMultiModalProjector
from ...activations import ACT2FN from .configuration_llava_onevision import LlavaOnevisionConfig from torch import nn class LlavaOnevisionMultiModalProjector(nn.Module): def __init__(self, config: LlavaOnevisionConfig): super().__init__() num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer) self.linear_1 = nn.Linear(config.vision_config.hidden_size * num_feature_layers, config.text_config.hidden_size, bias=config.multimodal_projector_bias) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias) def forward(self, image_features): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states
class LlavaOnevisionMultiModalProjector(nn.Module): def __init__(self, config: LlavaOnevisionConfig): pass def forward(self, image_features): pass
3
0
9
0
9
1
2
0.06
1
3
1
0
2
3
2
12
20
1
18
8
15
1
12
8
9
2
1
0
3
3,437
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/modeling_llava_onevision.py
transformers.models.llava_onevision.modeling_llava_onevision.LlavaOnevisionPreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...modeling_utils import PreTrainedModel from .configuration_llava_onevision import LlavaOnevisionConfig from torch import nn import math @auto_docstring class LlavaOnevisionPreTrainedModel(PreTrainedModel): config: LlavaOnevisionConfig base_model_prefix = '' supports_gradient_checkpointing = True _no_split_modules = ['LlamaDecoderLayer'] _skip_keys_device_placement = 'past_key_values' _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module): std = getattr(self.config, 'initializer_range', self.config.get_text_config().initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, LlavaOnevisionModel): embed_std = 1 / math.sqrt(self.config.text_config.hidden_size) module.image_newline.data.normal_(mean=0.0, std=embed_std)
@auto_docstring class LlavaOnevisionPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
21
2
16
3
7
0.19
1
0
0
1
1
0
1
1
34
3
27
13
25
5
22
13
20
7
1
2
7
3,438
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/modular_llava_onevision.py
transformers.models.llava_onevision.modular_llava_onevision.LlavaOnevisionImageProcessorFast
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, get_image_size from ...utils import TensorType, auto_docstring, can_return_tuple, is_torchvision_v2_available, logging from ...image_processing_utils import BatchFeature from typing import Optional, Union import torch from transformers.models.llava_next.image_processing_llava_next_fast import LlavaNextImageProcessorFast from ...image_processing_utils_fast import DefaultFastImageProcessorKwargs, group_images_by_shape, reorder_images from ...processing_utils import Unpack class LlavaOnevisionImageProcessorFast(LlavaNextImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {'height': 384, 'width': 384} crop_size = None default_to_square = False do_resize = True do_center_crop = None do_rescale = True do_normalize = True do_convert_rgb = True do_pad = True image_grid_pinpoints = [[384, 384], [384, 768], [384, 1152], [384, 1536], [384, 1920], [384, 2304], [768, 384], [768, 768], [768, 1152], [768, 1536], [768, 1920], [768, 2304], [1152, 384], [1152, 768], [1152, 1152], [1152, 1536], [1152, 1920], [1152, 2304], [1536, 384], [1536, 768], [1536, 1152], [1536, 1536], [1536, 1920], [1536, 2304], [1920, 384], [1920, 768], [1920, 1152], [1920, 1536], [1920, 1920], [1920, 2304], [2304, 384], [2304, 768], [2304, 1152], [2304, 1536], [2304, 1920], [2304, 2304]] model_input_names = ['pixel_values', 'image_sizes', 'batch_num_images'] def pad_to_square(self, images: 'torch.Tensor', background_color: Union[int, tuple[int, int, int]]=0) -> 'torch.Tensor': """ Pads an image to a square based on the longest edge. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. """ height, width = get_image_size(images, ChannelDimension.FIRST) if height == width: return images num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0] if isinstance(background_color, int): background_color = [background_color] + [0] * (num_channels - 1) elif len(background_color) != num_channels: raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels') max_dim = max(height, width) paste_x_left = (max_dim - width) // 2 paste_y_left = (max_dim - height) // 2 paste_x_right = max_dim - width - paste_x_left paste_y_right = max_dim - height - paste_y_left padded_images = F.pad(images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color) return padded_images @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaOnevisionFastImageProcessorKwargs]) -> BatchFeature: if isinstance(images, (tuple, list)) and isinstance(images[0], (tuple, list)): batch_num_images = [len(x) for x in images] elif isinstance(images, (tuple, list)): batch_num_images = [1] * len(images) else: batch_num_images = [1] kwargs['batch_num_images'] = batch_num_images return super().preprocess(images, **kwargs) def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: SizeDict, image_grid_pinpoints: list[list[int]], interpolation: Optional['F.InterpolationMode'], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, batch_num_images: list[int], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: processed_images = [] image_sizes = [] need_patching = [n == 1 for n in batch_num_images for _ in range(n)] if size and size.height and size.width: size_tuple = (size.height, size.width) else: size_tuple = (size.shortest_edge, size.shortest_edge) if crop_size and crop_size.height: patch_size = crop_size.height elif size and size.height: patch_size = size.height else: patch_size = size.shortest_edge for i, image in enumerate(images): if need_patching[i]: image_patches = self._get_image_patches(image, image_grid_pinpoints, size=size_tuple, patch_size=patch_size, interpolation=interpolation) else: padded_image = self.pad_to_square(images=image, background_color=tuple((int(x * 255) for x in self.image_mean))) image_patches = [padded_image] processed_image_patches_grouped = {} grouped_image_patches, grouped_image_patches_index = group_images_by_shape(image_patches, disable_grouping=disable_grouping) for shape, stacked_image_patches in grouped_image_patches.items(): if do_resize: stacked_image_patches = self.resize(image=stacked_image_patches, size=size, interpolation=interpolation) if do_center_crop: stacked_image_patches = self.center_crop(stacked_image_patches, crop_size) stacked_image_patches = self.rescale_and_normalize(stacked_image_patches, do_rescale, rescale_factor, do_normalize, image_mean, image_std) processed_image_patches_grouped[shape] = stacked_image_patches processed_image_patches = reorder_images(processed_image_patches_grouped, grouped_image_patches_index) processed_image_patches = torch.stack(processed_image_patches, dim=0) if return_tensors else processed_image_patches processed_images.append(processed_image_patches) image_sizes.append(get_image_size(image, ChannelDimension.FIRST)) if do_pad: processed_images = self._pad_for_batching(processed_images) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={'pixel_values': processed_images, 'image_sizes': image_sizes, 'batch_num_images': batch_num_images}, tensor_type=return_tensors)
class LlavaOnevisionImageProcessorFast(LlavaNextImageProcessorFast): def pad_to_square(self, images: 'torch.Tensor', background_color: Union[int, tuple[int, int, int]]=0) -> 'torch.Tensor': ''' Pads an image to a square based on the longest edge. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. ''' pass @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaOnevisionFastImageProcessorKwargs]) -> BatchFeature: pass def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: SizeDict, image_grid_pinpoints: list[list[int]], interpolation: Optional['F.InterpolationMode'], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, batch_num_images: list[int], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: pass
5
1
0
0
0
0
0
0.07
1
0
0
0
0
0
0
42
15
0
15
15
14
1
15
15
14
0
5
0
0
3,439
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/processing_llava_onevision.py
transformers.models.llava_onevision.processing_llava_onevision.LlavaOnevisionProcessor
from ...feature_extraction_utils import BatchFeature from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack from collections.abc import Iterable import numpy as np import math from typing import Optional, Union from ...video_utils import VideoInput from ...image_processing_utils import select_best_resolution from ...image_utils import ImageInput, get_image_size, to_numpy_array from ...tokenization_utils_base import PreTokenizedInput, TextInput class LlavaOnevisionProcessor(ProcessorMixin): """ Constructs a LLaVa-Onevision processor which wraps a LLaVa-Onevision video processor, LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor. [`LlavaNextProcessor`] offers all the functionalities of [`LlavaOnevisionVideoProcessor`], [`LlavaOnevisionImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaOnevisionVideoProcessor.__call__`], [`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information. Args: image_processor ([`LlavaOnevisionImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. video_processor ([`LlavaOnevisionVideoProcessor`], *optional*): The video processor is a required input. num_image_tokens (`int`, *optional*): Number of image tokens for one imagethat will be returned by vision tower. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Should be same as in model's config chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. video_token (`str`, *optional*, defaults to `"<video>"`): Special token used to denote video location. vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`): Aspect ratio used when processong image features. The default value is "anyres_max_9". """ attributes = ['image_processor', 'tokenizer', 'video_processor'] image_processor_class = 'AutoImageProcessor' tokenizer_class = 'AutoTokenizer' video_processor_class = 'AutoVideoProcessor' def __init__(self, image_processor=None, tokenizer=None, video_processor=None, num_image_tokens=None, vision_feature_select_strategy=None, chat_template=None, image_token='<image>', video_token='<video>', vision_aspect_ratio='anyres_max_9', **kwargs): self.num_image_tokens = num_image_tokens self.vision_feature_select_strategy = vision_feature_select_strategy self.image_token = tokenizer.image_token if hasattr(tokenizer, 'image_token') else image_token self.video_token = tokenizer.video_token if hasattr(tokenizer, 'video_token') else video_token self.image_token_id = tokenizer.image_token_id if getattr(tokenizer, 'image_token_id', None) else tokenizer.convert_tokens_to_ids(self.image_token) self.video_token_id = tokenizer.video_token_id if getattr(tokenizer, 'video_token_id', None) else tokenizer.convert_tokens_to_ids(self.video_token) self.vision_aspect_ratio = vision_aspect_ratio super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template) def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos: Optional[VideoInput]=None, **kwargs: Unpack[LlavaOnevisionProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **pixel_values_videos** -- Pixel values of a video input to be fed to a model. Returned when `videos` is not `None`. - **image_sizes** -- Size of each image that will be used to unpad an image. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs(LlavaOnevisionProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) if isinstance(text, str): text = [text] elif not isinstance(text, list) and (not isinstance(text[0], str)): raise TypeError('Invalid input text. Please provide a string, or a list of strings') image_inputs = video_inputs = {} if images is not None: image_inputs = self.image_processor(images, **output_kwargs['images_kwargs']) batch_num_images = iter(image_inputs['batch_num_images']) image_sizes = iter(image_inputs['image_sizes']) height, width = get_image_size(to_numpy_array(image_inputs['pixel_values'][0][0]), channel_dim=output_kwargs['images_kwargs'].get('data_format')) text, num_image_tokens = self._expand_image_tokens(text, image_sizes, height, width, self.image_token, batch_num_images) if videos is not None: video_inputs = self.video_processor(videos, **output_kwargs['videos_kwargs']) one_video = video_inputs.get('pixel_values_videos')[0] if isinstance(video_inputs.get('pixel_values_videos')[0], (list, tuple)): one_video = np.array(one_video) else: one_video = to_numpy_array(one_video) height, width = get_image_size(one_video[0], channel_dim=output_kwargs['images_kwargs'].get('data_format')) num_frames = one_video.shape[0] patches_height_width = int(math.sqrt(self.num_image_tokens)) pooled_height_width = math.ceil(patches_height_width / 2) num_video_tokens = num_frames * pooled_height_width * pooled_height_width + 1 text = [sample.replace(self.video_token, self.video_token * num_video_tokens) for sample in text] return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None) return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', None) text_inputs = self.tokenizer(text, **output_kwargs['text_kwargs']) self._check_special_mm_tokens(text, text_inputs, modalities=['image']) if return_mm_token_type_ids: array_ids = np.array(text_inputs['input_ids']) mm_token_type_ids = np.zeros_like(text_inputs['input_ids']) mm_token_type_ids[array_ids == self.image_token_id] = 1 text_inputs['mm_token_type_ids'] = mm_token_type_ids.tolist() return BatchFeature(data={**text_inputs, **image_inputs, **video_inputs}, tensor_type=return_tensors) def _expand_image_tokens(self, text: list[TextInput], image_sizes: Iterable[Union[list[int], int]], height: int, width: int, special_token: str, batch_num_images: Iterable[int]): prompt_strings = [] max_num_vision_tokens = 0 for sample in text: if special_token in sample: num_images = next(batch_num_images) is_multi_image = num_images != 1 else: is_multi_image = False while special_token in sample: original_size = next(image_sizes) if is_multi_image: num_image_tokens = self.num_image_tokens + 1 else: if not isinstance(original_size, (list, tuple)): original_size = original_size.tolist() orig_height, orig_width = original_size num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width) max_num_vision_tokens = max(max_num_vision_tokens, num_image_tokens) if self.vision_feature_select_strategy == 'default': num_image_tokens -= 1 sample = sample.replace(special_token, '<placeholder>' * num_image_tokens, 1) prompt_strings.append(sample) text = [sample.replace('<placeholder>', special_token) for sample in prompt_strings] return (text, max_num_vision_tokens) def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int: image_grid_pinpoints = self.image_processor.image_grid_pinpoints height_best_resolution, width_best_resolution = select_best_resolution([orig_height, orig_width], image_grid_pinpoints) scale_height, scale_width = (height_best_resolution // height, width_best_resolution // width) patches_height = patches_width = int(math.sqrt(self.num_image_tokens)) unpadded_features, newline_features = self._get_unpadded_features(orig_height, orig_width, patches_height, patches_width, scale_height, scale_width) base_features = self.num_image_tokens num_image_tokens = unpadded_features + newline_features + base_features return num_image_tokens def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width): """ Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA because it divided each image into patches depending on its resolution. Therefore we need to calculate how many patches an image is divided into and get the number of features from that. """ current_height = patches_height * scale_height current_width = patches_width * scale_width original_aspect_ratio = width / height current_aspect_ratio = current_width / current_height if original_aspect_ratio > current_aspect_ratio: new_height = int(round(height * (current_width / width), 7)) padding = (current_height - new_height) // 2 current_height -= padding * 2 else: new_width = int(round(width * (current_height / height), 7)) padding = (current_width - new_width) // 2 current_width -= padding * 2 unpadded_features = current_height * current_width newline_features = current_height max_num_patches = int(self.vision_aspect_ratio.strip('anyres_max_')) ratio = math.sqrt(current_height * current_width / (max_num_patches * patches_height ** 2)) if ratio > 1.1: unpadded_features = int(current_height // ratio) * int(current_width // ratio) newline_features = int(current_height // ratio) return (unpadded_features, newline_features) def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. video_sizes (list[list[str]], *optional*): The input sizes formatted as (num_frames, height, width) per each video. audio_lengths (list[int], *optional*): The input length formatted as per each audio. Returns: dict[str, list[int]]: A dictionary mapping each modality ("image", "video", "audio") to a list containing the number of placeholder tokens required. If the model doesn't accept a certain modality or no input sizes are provided, the dict value is set to an empty list. """ vision_data = {} if image_sizes is not None: images_kwargs = LlavaOnevisionProcessorKwargs._defaults.get('images_kwargs', {}) images_kwargs.update(kwargs) size = images_kwargs.get('size', None) or self.image_processor.size size = (size['shortest_edge'], size['shortest_edge']) if 'shortest_edge' in size else (min(size['height'], size['width']), min(size['height'], size['width'])) processed_height, processed_width = size batch_num_image_tokens = [] num_image_patches = [1] * len(image_sizes) for image_size in image_sizes: orig_height, orig_width = image_size num_image_tokens = self._get_number_of_features(orig_height, orig_width, processed_height, processed_width) if self.vision_feature_select_strategy == 'default': num_image_tokens -= 1 batch_num_image_tokens.append(num_image_tokens) vision_data.update({'num_image_tokens': batch_num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data)
class LlavaOnevisionProcessor(ProcessorMixin): ''' Constructs a LLaVa-Onevision processor which wraps a LLaVa-Onevision video processor, LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor. [`LlavaNextProcessor`] offers all the functionalities of [`LlavaOnevisionVideoProcessor`], [`LlavaOnevisionImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaOnevisionVideoProcessor.__call__`], [`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information. Args: image_processor ([`LlavaOnevisionImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. video_processor ([`LlavaOnevisionVideoProcessor`], *optional*): The video processor is a required input. num_image_tokens (`int`, *optional*): Number of image tokens for one imagethat will be returned by vision tower. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Should be same as in model's config chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. video_token (`str`, *optional*, defaults to `"<video>"`): Special token used to denote video location. vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`): Aspect ratio used when processong image features. The default value is "anyres_max_9". ''' def __init__(self, image_processor=None, tokenizer=None, video_processor=None, num_image_tokens=None, vision_feature_select_strategy=None, chat_template=None, image_token='<image>', video_token='<video>', vision_aspect_ratio='anyres_max_9', **kwargs): pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos: Optional[VideoInput]=None, **kwargs: Unpack[LlavaOnevisionProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **pixel_values_videos** -- Pixel values of a video input to be fed to a model. Returned when `videos` is not `None`. - **image_sizes** -- Size of each image that will be used to unpad an image. Returned when `images` is not `None`. ''' pass def _expand_image_tokens(self, text: list[TextInput], image_sizes: Iterable[Union[list[int], int]], height: int, width: int, special_token: str, batch_num_images: Iterable[int]): pass def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int: pass def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width): ''' Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA because it divided each image into patches depending on its resolution. Therefore we need to calculate how many patches an image is divided into and get the number of features from that. ''' pass def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs): ''' Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. video_sizes (list[list[str]], *optional*): The input sizes formatted as (num_frames, height, width) per each video. audio_lengths (list[int], *optional*): The input length formatted as per each audio. Returns: dict[str, list[int]]: A dictionary mapping each modality ("image", "video", "audio") to a list containing the number of placeholder tokens required. If the model doesn't accept a certain modality or no input sizes are provided, the dict value is set to an empty list. ''' pass
7
4
22
2
15
5
3
0.45
1
10
3
0
9
4
10
27
276
37
166
88
127
75
112
60
101
6
2
3
29
3,440
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/processing_llava_onevision.py
transformers.models.llava_onevision.processing_llava_onevision.LlavaOnevisionProcessorKwargs
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack class LlavaOnevisionProcessorKwargs(ProcessingKwargs, total=False): _defaults = {'text_kwargs': {'padding': False, 'return_mm_token_type_ids': False}, 'image_kwargs': {}, 'videos_kwargs': {}}
class LlavaOnevisionProcessorKwargs(ProcessingKwargs, total=False): pass
1
0
0
0
0
0
0
0.13
2
0
0
0
0
0
0
0
9
0
8
2
7
1
2
2
1
0
3
0
0
3,441
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/llava_onevision/video_processing_llava_onevision.py
transformers.models.llava_onevision.video_processing_llava_onevision.LlavaOnevisionVideoProcessor
from ...video_processing_utils import BaseVideoProcessor from ...processing_utils import Unpack, VideosKwargs from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling class LlavaOnevisionVideoProcessor(BaseVideoProcessor): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {'height': 384, 'width': 384} rescale_factor = 1 / 255 default_to_square = False crop_size = None do_resize = True do_center_crop = None do_rescale = True do_normalize = True do_convert_rgb = True do_sample_frames = False valid_kwargs = LlavaOnevisionFastVideoProcessorInitKwargs model_input_names = ['pixel_values_videos'] def __init__(self, **kwargs: Unpack[LlavaOnevisionFastVideoProcessorInitKwargs]): super().__init__(**kwargs)
class LlavaOnevisionVideoProcessor(BaseVideoProcessor): def __init__(self, **kwargs: Unpack[LlavaOnevisionFastVideoProcessorInitKwargs]): pass
2
0
78
5
45
27
8
0.81
1
9
2
0
3
9
3
23
271
21
138
57
93
112
48
16
44
12
3
1
23
3,442
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/configuration_longformer.py
transformers.models.longformer.configuration_longformer.LongformerConfig
from typing import TYPE_CHECKING, Any, Optional, Union from ...configuration_utils import PretrainedConfig class LongformerConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`LongformerModel`] or a [`TFLongformerModel`]. It is used to instantiate a Longformer model according to the specified arguments, defining the model architecture. This is the configuration class to store the configuration of a [`LongformerModel`]. It is used to instantiate an Longformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LongFormer [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) architecture with a sequence length 4,096. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the Longformer model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LongformerModel`] or [`TFLongformerModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`LongformerModel`] or [`TFLongformerModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. attention_window (`int` or `list[int]`, *optional*, defaults to 512): Size of an attention window around each token. If an `int`, use the same size for all layers. To specify a different window size for each layer, use a `list[int]` where `len(attention_window) == num_hidden_layers`. Example: ```python >>> from transformers import LongformerConfig, LongformerModel >>> # Initializing a Longformer configuration >>> configuration = LongformerConfig() >>> # Initializing a model from the configuration >>> model = LongformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'longformer' def __init__(self, attention_window: Union[list[int], int]=512, sep_token_id: int=2, pad_token_id: int=1, bos_token_id: int=0, eos_token_id: int=2, vocab_size: int=30522, hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.1, attention_probs_dropout_prob: float=0.1, max_position_embeddings: int=512, type_vocab_size: int=2, initializer_range: float=0.02, layer_norm_eps: float=1e-12, onnx_export: bool=False, **kwargs): """Constructs LongformerConfig.""" super().__init__(pad_token_id=pad_token_id, **kwargs) self.attention_window = attention_window self.sep_token_id = sep_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.onnx_export = onnx_export
class LongformerConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`LongformerModel`] or a [`TFLongformerModel`]. It is used to instantiate a Longformer model according to the specified arguments, defining the model architecture. This is the configuration class to store the configuration of a [`LongformerModel`]. It is used to instantiate an Longformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LongFormer [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) architecture with a sequence length 4,096. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the Longformer model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LongformerModel`] or [`TFLongformerModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`LongformerModel`] or [`TFLongformerModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. attention_window (`int` or `list[int]`, *optional*, defaults to 512): Size of an attention window around each token. If an `int`, use the same size for all layers. To specify a different window size for each layer, use a `list[int]` where `len(attention_window) == num_hidden_layers`. Example: ```python >>> from transformers import LongformerConfig, LongformerModel >>> # Initializing a Longformer configuration >>> configuration = LongformerConfig() >>> # Initializing a model from the configuration >>> model = LongformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, attention_window: Union[list[int], int]=512, sep_token_id: int=2, pad_token_id: int=1, bos_token_id: int=0, eos_token_id: int=2, vocab_size: int=30522, hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.1, attention_probs_dropout_prob: float=0.1, max_position_embeddings: int=512, type_vocab_size: int=2, initializer_range: float=0.02, layer_norm_eps: float=1e-12, onnx_export: bool=False, **kwargs): '''Constructs LongformerConfig.''' pass
2
2
42
1
40
1
1
1.26
1
5
0
0
1
17
1
1
106
11
42
41
19
53
21
20
19
1
1
0
1
3,443
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/configuration_longformer.py
transformers.models.longformer.configuration_longformer.LongformerOnnxConfig
from ...onnx import OnnxConfig from collections import OrderedDict from typing import TYPE_CHECKING, Any, Optional, Union from collections.abc import Mapping class LongformerOnnxConfig(OnnxConfig): def __init__(self, config: 'PretrainedConfig', task: str='default', patching_specs: 'Optional[list[PatchingSpec]]'=None): super().__init__(config, task, patching_specs) config.onnx_export = True @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis)]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: outputs = super().outputs if self.task == 'default': outputs['pooler_output'] = {0: 'batch'} return outputs @property def atol_for_validation(self) -> float: """ What absolute tolerance value to use during model conversion validation. Returns: Float absolute tolerance value. """ return 0.0001 @property def default_onnx_opset(self) -> int: return max(super().default_onnx_opset, 14) def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizerBase', batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: inputs = super().generate_dummy_inputs(preprocessor=tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair) import torch inputs['global_attention_mask'] = torch.zeros_like(inputs['input_ids']) inputs['global_attention_mask'][:, ::2] = 1 return inputs
class LongformerOnnxConfig(OnnxConfig): def __init__(self, config: 'PretrainedConfig', task: str='default', patching_specs: 'Optional[list[PatchingSpec]]'=None): pass @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def outputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def atol_for_validation(self) -> float: ''' What absolute tolerance value to use during model conversion validation. Returns: Float absolute tolerance value. ''' pass @property def default_onnx_opset(self) -> int: pass def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizerBase', batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass
11
1
9
1
7
2
1
0.2
1
7
0
0
6
0
6
6
61
8
44
22
25
9
24
11
16
2
1
1
8
3,444
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py
transformers.models.longformer.convert_longformer_original_pytorch_lightning_to_pytorch.LightningModel
from torch import nn import pytorch_lightning as pl class LightningModel(pl.LightningModule): def __init__(self, model): super().__init__() self.model = model self.num_labels = 2 self.qa_outputs = nn.Linear(self.model.config.hidden_size, self.num_labels) def forward(self): pass
class LightningModel(pl.LightningModule): def __init__(self, model): pass def forward(self): pass
3
0
4
0
4
0
1
0.13
1
1
0
0
2
3
2
2
10
1
8
6
5
1
8
6
5
1
1
0
2
3,445
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerAttention
from torch import nn from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer class LongformerAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.self = LongformerSelfAttention(config, layer_id) self.output = LongformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions) attn_output = self.output(self_outputs[0], hidden_states) outputs = (attn_output,) + self_outputs[1:] return outputs
class LongformerAttention(nn.Module): def __init__(self, config, layer_id=0): pass def prune_heads(self, heads): pass def forward(self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False): pass
4
0
15
1
13
1
1
0.05
1
4
2
0
3
3
3
13
47
4
41
20
28
2
22
11
18
2
1
1
4
3,446
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro="\n Base class for Longformer's outputs, with potential hidden states, local and global attentions.\n ") class LongformerBaseModelOutput(ModelOutput): """ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ last_hidden_state: torch.FloatTensor hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro="\n Base class for Longformer's outputs, with potential hidden states, local and global attentions.\n ") class LongformerBaseModelOutput(ModelOutput): ''' attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. ''' pass
3
1
0
0
0
0
0
6
1
0
0
0
0
0
0
0
40
5
5
4
4
30
5
4
4
0
1
0
0
3,447
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro="\n Base class for Longformer's outputs that also contains a pooling of the last hidden states.\n ") class LongformerBaseModelOutputWithPooling(ModelOutput): """ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ last_hidden_state: torch.FloatTensor pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro="\n Base class for Longformer's outputs that also contains a pooling of the last hidden states.\n ") class LongformerBaseModelOutputWithPooling(ModelOutput): ''' pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. ''' pass
3
1
0
0
0
0
0
5.67
1
0
0
0
0
0
0
0
45
5
6
5
5
34
6
5
5
0
1
0
0
3,448
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerClassificationHead
from torch import nn import torch class LongformerClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, hidden_states, **kwargs): hidden_states = hidden_states[:, 0, :] hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) output = self.out_proj(hidden_states) return output
class LongformerClassificationHead(nn.Module): '''Head for sentence-level classification tasks.''' def __init__(self, config): pass def forward(self, hidden_states, **kwargs): pass
3
1
7
0
7
1
1
0.14
1
1
0
0
2
3
2
12
17
2
14
7
11
2
14
7
11
1
1
0
2
3,449
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerEmbeddings
from torch import nn import torch class LongformerEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor inputs_embeds: Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape)
class LongformerEmbeddings(nn.Module): ''' Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. ''' def __init__(self, config): pass def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): pass def create_position_ids_from_inputs_embeds(self, inputs_embeds): ''' We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor inputs_embeds: Returns: torch.Tensor ''' pass
4
2
18
3
12
3
3
0.32
1
1
0
0
3
6
3
13
62
12
38
17
34
12
32
17
28
6
1
2
8
3,450
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerEncoder
from torch import nn class LongformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LongformerLayer(config, layer_id=i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, head_mask=None, padding_len=0, output_attentions=False, output_hidden_states=False, return_dict=True): is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_global_attentions = () if output_attentions and is_global_attn else None if head_mask is not None: assert head_mask.size()[0] == len(self.layer), f'The head_mask should be specified for {len(self.layer)} layers, but it is for {head_mask.size()[0]}.' for idx, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),) if is_global_attn: all_global_attentions = all_global_attentions + (layer_outputs[2].transpose(2, 3),) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = hidden_states[:, :hidden_states.shape[1] - padding_len] if output_hidden_states: all_hidden_states = tuple((state[:, :state.shape[1] - padding_len] for state in all_hidden_states)) if output_attentions: all_attentions = tuple((state[:, :, :state.shape[2] - padding_len, :] for state in all_attentions)) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None)) return LongformerBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, global_attentions=all_global_attentions)
class LongformerEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, head_mask=None, padding_len=0, output_attentions=False, output_hidden_states=False, return_dict=True): pass
3
0
44
5
36
4
9
0.11
1
6
2
0
2
3
2
12
90
11
72
23
60
8
36
14
33
16
1
3
17
3,451
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerForMaskedLM
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...utils import ModelOutput, auto_docstring, logging import torch from typing import Optional, Union @auto_docstring class LongformerForMaskedLM(LongformerPreTrainedModel): _tied_weights_keys = ['lm_head.decoder'] def __init__(self, config): super().__init__(config) self.longformer = LongformerModel(config, add_pooling_layer=False) self.lm_head = LongformerLMHead(config) self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerMaskedLMOutput]: """ global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Example Mask filling: ```python >>> from transformers import AutoTokenizer, LongformerForMaskedLM >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096") >>> model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") ``` Let's try a very long input. ```python >>> TXT = ( ... "My friends are <mask> but they eat too many carbs." ... + " That's why I decide not to eat with them." * 300 ... ) >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ['healthy', 'skinny', 'thin', 'good', 'vegetarian'] ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.longformer(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(prediction_scores.device) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return LongformerMaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions)
@auto_docstring class LongformerForMaskedLM(LongformerPreTrainedModel): def __init__(self, config): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerMaskedLMOutput]: ''' global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Example Mask filling: ```python >>> from transformers import AutoTokenizer, LongformerForMaskedLM >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096") >>> model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") ``` Let's try a very long input. ```python >>> TXT = ( ... "My friends are <mask> but they eat too many carbs." ... + " That's why I decide not to eat with them." * 300 ... ) >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ['healthy', 'skinny', 'thin', 'good', 'vegetarian'] ``` ''' pass
7
1
24
4
13
7
2
0.51
1
6
3
0
4
2
4
5
105
19
57
28
37
29
25
14
20
5
2
1
8
3,452
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerForMultipleChoice
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn import torch from ...utils import ModelOutput, auto_docstring, logging from typing import Optional, Union @auto_docstring class LongformerForMultipleChoice(LongformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.longformer = LongformerModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerMultipleChoiceModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) global_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] return_dict = return_dict if return_dict is not None else self.config.use_return_dict if global_attention_mask is None and input_ids is not None: logger.warning_once('Initializing global attention on multiple choice...') global_attention_mask = torch.stack([_compute_global_attention_mask(input_ids[:, i], self.config.sep_token_id, before_sep_token=False) for i in range(num_choices)], dim=1) flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_global_attention_mask = global_attention_mask.view(-1, global_attention_mask.size(-1)) if global_attention_mask is not None else None flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.longformer(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, global_attention_mask=flat_global_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(reshaped_logits.device) loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return LongformerMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions)
@auto_docstring class LongformerForMultipleChoice(LongformerPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerMultipleChoiceModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) global_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. ''' pass
5
1
48
5
38
5
7
0.11
1
6
2
0
2
3
2
3
105
11
85
36
61
9
33
20
30
13
2
1
14
3,453
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerForQuestionAnswering
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn import torch from ...utils import ModelOutput, auto_docstring, logging from typing import Optional, Union @auto_docstring class LongformerForQuestionAnswering(LongformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.longformer = LongformerModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerQuestionAnsweringModelOutput]: """ global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). Examples: ```python >>> from transformers import AutoTokenizer, LongformerForQuestionAnswering >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") >>> model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> encoding = tokenizer(question, text, return_tensors="pt") >>> input_ids = encoding["input_ids"] >>> # default is local attention everywhere >>> # the forward method will automatically set global attention on question tokens >>> attention_mask = encoding["attention_mask"] >>> outputs = model(input_ids, attention_mask=attention_mask) >>> start_logits = outputs.start_logits >>> end_logits = outputs.end_logits >>> all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist()) >>> answer_tokens = all_tokens[torch.argmax(start_logits) : torch.argmax(end_logits) + 1] >>> answer = tokenizer.decode( ... tokenizer.convert_tokens_to_ids(answer_tokens) ... ) # remove space prepending space token ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if global_attention_mask is None: if input_ids is None: logger.warning('It is not possible to automatically generate the `global_attention_mask` because input_ids is None. Please make sure that it is correctly set.') else: global_attention_mask = _compute_global_attention_mask(input_ids, self.config.sep_token_id) outputs = self.longformer(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return LongformerQuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions)
@auto_docstring class LongformerForQuestionAnswering(LongformerPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerQuestionAnsweringModelOutput]: ''' global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). Examples: ```python >>> from transformers import AutoTokenizer, LongformerForQuestionAnswering >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") >>> model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> encoding = tokenizer(question, text, return_tensors="pt") >>> input_ids = encoding["input_ids"] >>> # default is local attention everywhere >>> # the forward method will automatically set global attention on question tokens >>> attention_mask = encoding["attention_mask"] >>> outputs = model(input_ids, attention_mask=attention_mask) >>> start_logits = outputs.start_logits >>> end_logits = outputs.end_logits >>> all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist()) >>> answer_tokens = all_tokens[torch.argmax(start_logits) : torch.argmax(end_logits) + 1] >>> answer = tokenizer.decode( ... tokenizer.convert_tokens_to_ids(answer_tokens) ... ) # remove space prepending space token ```''' pass
5
1
62
9
36
18
5
0.47
1
5
2
0
2
3
2
3
128
19
74
31
55
35
36
16
33
9
2
2
10
3,454
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerForSequenceClassification
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...utils import ModelOutput, auto_docstring, logging import torch from typing import Optional, Union @auto_docstring(custom_intro='\n Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ') class LongformerForSequenceClassification(LongformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.longformer = LongformerModel(config, add_pooling_layer=False) self.classifier = LongformerClassificationHead(config) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerSequenceClassifierOutput]: """ global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if global_attention_mask is None: logger.warning_once('Initializing global attention on CLS token...') global_attention_mask = torch.zeros_like(input_ids) global_attention_mask[:, 0] = 1 outputs = self.longformer(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return LongformerSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions)
@auto_docstring(custom_intro='\n Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ') class LongformerForSequenceClassification(LongformerPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerSequenceClassifierOutput]: ''' global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
45
5
36
4
7
0.1
1
7
3
0
2
4
2
3
99
10
81
27
57
8
38
13
35
13
2
3
14
3,455
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerForTokenClassification
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn import torch from ...utils import ModelOutput, auto_docstring, logging from typing import Optional, Union @auto_docstring class LongformerForTokenClassification(LongformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.longformer = LongformerModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerTokenClassifierOutput]: """ global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.longformer(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return LongformerTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions)
@auto_docstring class LongformerForTokenClassification(LongformerPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerTokenClassifierOutput]: ''' global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
5
1
33
5
26
3
3
0.08
1
5
2
0
2
4
2
3
79
10
64
27
37
5
23
13
20
5
2
1
6
3,456
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerIntermediate
import torch from ...activations import ACT2FN, gelu from torch import nn class LongformerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class LongformerIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
3,457
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerLMHead
from torch import nn from ...activations import ACT2FN, gelu import torch class LongformerLMHead(nn.Module): """Longformer Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) x = self.decoder(x) return x def _tie_weights(self): if self.decoder.bias.device.type == 'meta': self.decoder.bias = self.bias else: self.bias = self.decoder.bias
class LongformerLMHead(nn.Module): '''Longformer Head for masked language modeling.''' def __init__(self, config): pass def forward(self, features, **kwargs): pass def _tie_weights(self): pass
4
1
8
1
6
1
1
0.21
1
1
0
0
3
4
3
13
29
6
19
9
15
4
18
9
14
2
1
1
4
3,458
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerLayer
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...modeling_layers import GradientCheckpointingLayer class LongformerLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.attention = LongformerAttention(config, layer_id) self.intermediate = LongformerIntermediate(config) self.output = LongformerOutput(config) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 def forward(self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False): self_attn_outputs = self.attention(hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions) attn_output = self_attn_outputs[0] outputs = self_attn_outputs[1:] layer_output = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output) outputs = (layer_output,) + outputs return outputs def ff_chunk(self, attn_output): intermediate_output = self.intermediate(attn_output) layer_output = self.output(intermediate_output, attn_output) return layer_output
class LongformerLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): pass def forward(self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False): pass def ff_chunk(self, attn_output): pass
4
0
13
0
12
0
1
0
1
4
3
0
3
5
3
13
41
3
38
24
25
0
19
15
15
1
1
0
3
3,459
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerMaskedLMOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Base class for masked language models outputs.\n ') class LongformerMaskedLMOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Base class for masked language models outputs.\n ') class LongformerMaskedLMOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. ''' pass
3
1
0
0
0
0
0
5.33
1
0
0
0
0
0
0
0
43
5
6
6
5
32
6
6
5
0
1
0
0
3,460
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerModel
from torch import nn from ...utils import ModelOutput, auto_docstring, logging import torch from typing import Optional, Union @auto_docstring class LongformerModel(LongformerPreTrainedModel): """ This class copied code from [`RobertaModel`] and overwrote standard self-attention with longformer self-attention to provide the ability to process long sequences following the self-attention approach described in [Longformer: the Long-Document Transformer](https://huggingface.co/papers/2004.05150) by Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global attention to extend to long documents without the O(n^2) increase in memory and compute. The self-attention module `LongformerSelfAttention` implemented here supports the combination of local and global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA kernel to be memory and compute efficient. """ def __init__(self, config, add_pooling_layer=True): """ add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config if isinstance(config.attention_window, int): assert config.attention_window % 2 == 0, '`config.attention_window` has to be an even value' assert config.attention_window > 0, '`config.attention_window` has to be positive' config.attention_window = [config.attention_window] * config.num_hidden_layers else: assert len(config.attention_window) == config.num_hidden_layers, f'`len(config.attention_window)` should equal `config.num_hidden_layers`. Expected {config.num_hidden_layers}, given {len(config.attention_window)}' self.embeddings = LongformerEmbeddings(config) self.encoder = LongformerEncoder(config) self.pooler = LongformerPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def _pad_to_window_size(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int): """A helper function to pad tokens and mask to work with implementation of Longformer self-attention.""" attention_window = self.config.attention_window if isinstance(self.config.attention_window, int) else max(self.config.attention_window) assert attention_window % 2 == 0, f'`attention_window` should be an even value. Given {attention_window}' input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape batch_size, seq_len = input_shape[:2] padding_len = (attention_window - seq_len % attention_window) % attention_window if padding_len > 0: logger.warning_once(f'Input ids are automatically padded to be a multiple of `config.attention_window`: {attention_window}') if input_ids is not None: input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id) if position_ids is not None: position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id) if inputs_embeds is not None: input_ids_padding = inputs_embeds.new_full((batch_size, padding_len), self.config.pad_token_id, dtype=torch.long) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad(attention_mask, (0, padding_len), value=0) token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) return (padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds) def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor): if attention_mask is not None: attention_mask = attention_mask * (global_attention_mask + 1) else: attention_mask = global_attention_mask + 1 return attention_mask @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerBaseModelOutputWithPooling]: """ global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). Examples: ```python >>> import torch >>> from transformers import LongformerModel, AutoTokenizer >>> model = LongformerModel.from_pretrained("allenai/longformer-base-4096") >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096") >>> SAMPLE_TEXT = " ".join(["Hello world! "] * 1000) # long input document >>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1 >>> attention_mask = torch.ones( ... input_ids.shape, dtype=torch.long, device=input_ids.device ... ) # initialize to local attention >>> global_attention_mask = torch.zeros( ... input_ids.shape, dtype=torch.long, device=input_ids.device ... ) # initialize to global attention to be deactivated for all tokens >>> global_attention_mask[ ... :, ... [ ... 1, ... 4, ... 21, ... ], ... ] = 1 # Set global attention to random tokens for the sake of this example >>> # Usually, set global attention based on the task. For example, >>> # classification: the <s> token >>> # QA: question tokens >>> # LM: potentially on the beginning of sentences and paragraphs >>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask) >>> sequence_output = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if global_attention_mask is not None: attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask) padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)[:, 0, 0, :] embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, padding_len=padding_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return LongformerBaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, global_attentions=encoder_outputs.global_attentions)
@auto_docstring class LongformerModel(LongformerPreTrainedModel): ''' This class copied code from [`RobertaModel`] and overwrote standard self-attention with longformer self-attention to provide the ability to process long sequences following the self-attention approach described in [Longformer: the Long-Document Transformer](https://huggingface.co/papers/2004.05150) by Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global attention to extend to long documents without the O(n^2) increase in memory and compute. The self-attention module `LongformerSelfAttention` implemented here supports the combination of local and global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA kernel to be memory and compute efficient. ''' def __init__(self, config, add_pooling_layer=True): ''' add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer ''' pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass def _pad_to_window_size(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int): '''A helper function to pad tokens and mask to work with implementation of Longformer self-attention.''' pass def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor): pass @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LongformerBaseModelOutputWithPooling]: ''' global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). Examples: ```python >>> import torch >>> from transformers import LongformerModel, AutoTokenizer >>> model = LongformerModel.from_pretrained("allenai/longformer-base-4096") >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096") >>> SAMPLE_TEXT = " ".join(["Hello world! "] * 1000) # long input document >>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1 >>> attention_mask = torch.ones( ... input_ids.shape, dtype=torch.long, device=input_ids.device ... ) # initialize to local attention >>> global_attention_mask = torch.zeros( ... input_ids.shape, dtype=torch.long, device=input_ids.device ... ) # initialize to global attention to be deactivated for all tokens >>> global_attention_mask[ ... :, ... [ ... 1, ... 4, ... 21, ... ], ... ] = 1 # Set global attention to random tokens for the sake of this example >>> # Usually, set global attention based on the task. For example, >>> # classification: the <s> token >>> # QA: question tokens >>> # LM: potentially on the beginning of sentences and paragraphs >>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask) >>> sequence_output = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output ``` ''' pass
10
5
30
4
20
7
4
0.46
1
9
4
0
7
4
7
8
235
34
140
48
110
64
70
27
62
13
2
2
29
3,461
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Base class for outputs of multiple choice Longformer models.\n ') class LongformerMultipleChoiceModelOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Base class for outputs of multiple choice Longformer models.\n ') class LongformerMultipleChoiceModelOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. ''' pass
3
1
0
0
0
0
0
5.5
1
0
0
0
0
0
0
0
45
6
6
6
5
33
6
6
5
0
1
0
0
3,462
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerOutput
from torch import nn import torch class LongformerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class LongformerOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
3,463
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerPooler
from torch import nn import torch class LongformerPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class LongformerPooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
5
1
1
0.2
1
2
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
3,464
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerPreTrainedModel
from torch import nn from .configuration_longformer import LongformerConfig from ...utils import ModelOutput, auto_docstring, logging from ...modeling_utils import PreTrainedModel @auto_docstring class LongformerPreTrainedModel(PreTrainedModel): config: LongformerConfig base_model_prefix = 'longformer' supports_gradient_checkpointing = True _no_split_modules = ['LongformerSelfAttention'] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
@auto_docstring class LongformerPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
15
0
12
3
6
0.41
1
0
0
6
1
0
1
1
26
2
17
6
15
7
15
6
13
6
1
2
6
3,465
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Base class for outputs of question answering Longformer models.\n ') class LongformerQuestionAnsweringModelOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None start_logits: Optional[torch.FloatTensor] = None end_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Base class for outputs of question answering Longformer models.\n ') class LongformerQuestionAnsweringModelOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. ''' pass
3
1
0
0
0
0
0
4.86
1
0
0
0
0
0
0
0
46
5
7
7
6
34
7
7
6
0
1
0
0
3,466
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerSelfAttention
from torch import nn import math import torch class LongformerSelfAttention(nn.Module): def __init__(self, config, layer_id): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_heads = config.num_attention_heads self.head_dim = int(config.hidden_size / config.num_attention_heads) self.embed_dim = config.hidden_size self.query = nn.Linear(config.hidden_size, self.embed_dim) self.key = nn.Linear(config.hidden_size, self.embed_dim) self.value = nn.Linear(config.hidden_size, self.embed_dim) self.query_global = nn.Linear(config.hidden_size, self.embed_dim) self.key_global = nn.Linear(config.hidden_size, self.embed_dim) self.value_global = nn.Linear(config.hidden_size, self.embed_dim) self.dropout = config.attention_probs_dropout_prob self.layer_id = layer_id attention_window = config.attention_window[self.layer_id] assert attention_window % 2 == 0, f'`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}' assert attention_window > 0, f'`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}' self.one_sided_attn_window_size = attention_window // 2 self.config = config def forward(self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False): """ [`LongformerSelfAttention`] expects *len(hidden_states)* to be multiple of *attention_window*. Padding to *attention_window* happens in [`LongformerModel.forward`] to avoid redoing the padding on each layer. The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to: - -10000: no attention - 0: local attention - +10000: global attention """ hidden_states = hidden_states.transpose(0, 1) query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size() assert embed_dim == self.embed_dim, f'hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}' query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) attn_scores = self._sliding_chunks_query_key_matmul(query_vectors, key_vectors, self.one_sided_attn_window_size) remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None] float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(remove_from_windowed_attention_mask, torch.finfo(query_vectors.dtype).min) diagonal_mask = self._sliding_chunks_query_key_matmul(float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size) attn_scores += diagonal_mask assert list(attn_scores.size()) == [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1], f'local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}' if is_global_attn: max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero = self._get_global_attn_indices(is_index_global_attn) global_key_attn_scores = self._concat_with_global_key_attn_probs(query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) del global_key_attn_scores attn_probs = nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_heads,), f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}' attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) attn_probs = attn_probs.type_as(attn_scores) del attn_scores attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) if is_global_attn: attn_output = self._compute_attn_output_with_global_indices(value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero) else: attn_output = self._sliding_chunks_matmul_attn_probs_value(attn_probs, value_vectors, self.one_sided_attn_window_size) assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), 'Unexpected size' attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() if is_global_attn: global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, layer_head_mask=layer_head_mask, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked) nonzero_global_attn_output = global_attn_output[is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]] attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(len(is_local_index_global_attn_nonzero[0]), -1) attn_probs[is_index_global_attn_nonzero] = 0 outputs = (attn_output.transpose(0, 1),) if output_attentions: outputs += (attn_probs,) return outputs + (global_attn_probs,) if is_global_attn and output_attentions else outputs @staticmethod def _pad_and_transpose_last_two_dims(hidden_states_padded, padding): """pads rows and then flips rows and columns""" hidden_states_padded = nn.functional.pad(hidden_states_padded, padding) hidden_states_padded = hidden_states_padded.view(*hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2)) return hidden_states_padded @staticmethod def _pad_and_diagonalize(chunked_hidden_states): """ shift every row 1 step right, converting columns into diagonals. Example: ```python chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629, ] window_overlap = num_rows = 4 ``` (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] """ total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states = nn.functional.pad(chunked_hidden_states, (0, window_overlap + 1)) chunked_hidden_states = chunked_hidden_states.view(total_num_heads, num_chunks, -1) chunked_hidden_states = chunked_hidden_states[:, :, :-window_overlap] chunked_hidden_states = chunked_hidden_states.view(total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim) chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states @staticmethod def _chunk(hidden_states, window_overlap, onnx_export: bool=False): """convert into overlapping chunks. Chunk size = 2w, overlap size = w""" if not onnx_export: hidden_states = hidden_states.view(hidden_states.size(0), torch.div(hidden_states.size(1), window_overlap * 2, rounding_mode='trunc'), window_overlap * 2, hidden_states.size(2)) chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2 - 1 chunk_stride = list(hidden_states.stride()) chunk_stride[1] = chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) chunk_size = [hidden_states.size(0), torch.div(hidden_states.size(1), window_overlap, rounding_mode='trunc') - 1, window_overlap * 2, hidden_states.size(2)] overlapping_chunks = torch.empty(chunk_size, device=hidden_states.device) for chunk in range(chunk_size[1]): overlapping_chunks[:, chunk, :, :] = hidden_states[:, chunk * window_overlap:chunk * window_overlap + 2 * window_overlap, :] return overlapping_chunks @staticmethod def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor: beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None, :] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, :affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) input_tensor[:, :affected_seq_len, :, :affected_seq_len + 1] = torch.full_like(beginning_input, -float('inf')).where(beginning_mask.bool(), beginning_input) ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):] ending_mask = ending_mask.expand(ending_input.size()) input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):] = torch.full_like(ending_input, -float('inf')).where(ending_mask.bool(), ending_input) def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): """ Matrix multiplication of query and key tensors using with a sliding window attention pattern. This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an overlap of size window_overlap """ batch_size, seq_len, num_heads, head_dim = query.size() assert seq_len % (window_overlap * 2) == 0, f'Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}' assert query.size() == key.size() chunks_count = torch.div(seq_len, window_overlap, rounding_mode='trunc') - 1 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) query = self._chunk(query, window_overlap, getattr(self.config, 'onnx_export', False)) key = self._chunk(key, window_overlap, getattr(self.config, 'onnx_export', False)) diagonal_chunked_attention_scores = torch.einsum('bcxd,bcyd->bcxy', (query, key)) diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(diagonal_chunked_attention_scores, padding=(0, 0, 0, 1)) diagonal_attention_scores = diagonal_chunked_attention_scores.new_zeros((batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)) diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[:, :, :window_overlap, :window_overlap + 1] diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[:, -1, window_overlap:, :window_overlap + 1] diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[:, :, -(window_overlap + 1):-1, window_overlap + 1:] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[:, 0, :window_overlap - 1, 1 - window_overlap:] diagonal_attention_scores = diagonal_attention_scores.view(batch_size, num_heads, seq_len, 2 * window_overlap + 1).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn_probs_value(self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int): """ Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the same shape as `attn_probs` """ batch_size, seq_len, num_heads, head_dim = value.size() assert seq_len % (window_overlap * 2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap + 1 chunks_count = torch.div(seq_len, window_overlap, rounding_mode='trunc') - 1 chunked_attn_probs = attn_probs.transpose(1, 2).reshape(batch_size * num_heads, torch.div(seq_len, window_overlap, rounding_mode='trunc'), window_overlap, 2 * window_overlap + 1) value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) padded_value = nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1) chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim) chunked_value_stride = padded_value.stride() chunked_value_stride = (chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2]) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum('bcwd,bcdh->bcwh', (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) @staticmethod def _get_global_attn_indices(is_index_global_attn): """compute global attn indices required throughout forward pass""" num_global_attn_indices = is_index_global_attn.long().sum(dim=1) max_num_global_attn_indices = num_global_attn_indices.max() is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True) is_local_index_global_attn = torch.arange(max_num_global_attn_indices, device=is_index_global_attn.device) < num_global_attn_indices.unsqueeze(dim=-1) is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True) is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True) return (max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero) def _concat_with_global_key_attn_probs(self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero): batch_size = key_vectors.shape[0] key_vectors_only_global = key_vectors.new_zeros(batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] attn_probs_from_global_key = torch.einsum('blhd,bshd->blhs', (query_vectors, key_vectors_only_global)) attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3) attn_probs_from_global_key[is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :] = torch.finfo(attn_probs_from_global_key.dtype).min attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3) return attn_probs_from_global_key def _compute_attn_output_with_global_indices(self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero): batch_size = attn_probs.shape[0] attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) value_vectors_only_global = value_vectors.new_zeros(batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] attn_output_only_global = torch.matmul(attn_probs_only_global.transpose(1, 2).clone(), value_vectors_only_global.transpose(1, 2).clone()).transpose(1, 2) attn_probs_without_global = attn_probs.narrow(-1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices).contiguous() attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(attn_probs_without_global, value_vectors, self.one_sided_attn_window_size) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output_from_hidden(self, hidden_states, max_num_global_attn_indices, layer_head_mask, is_local_index_global_attn_nonzero, is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, is_index_masked): seq_len, batch_size = hidden_states.shape[:2] global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[is_index_global_attn_nonzero[::-1]] global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) global_query_vectors_only_global /= math.sqrt(self.head_dim) global_query_vectors_only_global = global_query_vectors_only_global.contiguous().view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim).transpose(0, 1) global_key_vectors = global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) global_value_vectors = global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert list(global_attn_scores.size()) == [batch_size * self.num_heads, max_num_global_attn_indices, seq_len], f'global_attn_scores have the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is {global_attn_scores.size()}.' global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores = global_attn_scores.transpose(1, 2) global_attn_scores[is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :] = torch.finfo(global_attn_scores.dtype).min global_attn_scores = global_attn_scores.transpose(1, 2) global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :], torch.finfo(global_attn_scores.dtype).min) global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) global_attn_probs_float = nn.functional.softmax(global_attn_scores, dim=-1, dtype=torch.float32) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_heads,), f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}' global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_probs_float = global_attn_probs_float.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) global_attn_probs = nn.functional.dropout(global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training) global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert list(global_attn_output.size()) == [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim], f'global_attn_output tensor has the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is {global_attn_output.size()}.' global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_output = global_attn_output.view(batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim) return (global_attn_output, global_attn_probs)
null
18
7
51
8
33
11
2
0.33
1
8
0
0
7
13
12
22
633
101
407
131
354
135
191
88
178
7
1
1
22
3,467
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerSelfOutput
from torch import nn import torch class LongformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class LongformerSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
3,468
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerSequenceClassifierOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Base class for outputs of sentence classification models.\n ') class LongformerSequenceClassifierOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Base class for outputs of sentence classification models.\n ') class LongformerSequenceClassifierOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. ''' pass
3
1
0
0
0
0
0
5.33
1
0
0
0
0
0
0
0
43
5
6
6
5
32
6
6
5
0
1
0
0
3,469
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/modeling_longformer.py
transformers.models.longformer.modeling_longformer.LongformerTokenClassifierOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Base class for outputs of token classification models.\n ') class LongformerTokenClassifierOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Base class for outputs of token classification models.\n ') class LongformerTokenClassifierOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. ''' pass
3
1
0
0
0
0
0
5.33
1
0
0
0
0
0
0
0
43
5
6
6
5
32
6
6
5
0
1
0
0
3,470
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/tokenization_longformer.py
transformers.models.longformer.tokenization_longformer.LongformerTokenizer
from ...tokenization_utils import AddedToken, PreTrainedTokenizer import os from typing import Optional import json import regex as re class LongformerTokenizer(PreTrainedTokenizer): """ Constructs a Longformer tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import LongformerTokenizer >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (Longformer tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): vocab = dict(self.encoder).copy() vocab.update(self.added_tokens_encoder) return vocab def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Longformer sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())): text = ' ' + text return (text, kwargs)
class LongformerTokenizer(PreTrainedTokenizer): ''' Constructs a Longformer tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import LongformerTokenizer >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (Longformer tokenizer detect beginning of words by the preceding space). ''' def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs): pass @property def vocab_size(self): pass def get_vocab(self): pass def bpe(self, token): pass def _tokenize(self, text): '''Tokenize a string.''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Longformer sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): pass
15
8
18
2
13
4
3
0.61
1
11
0
0
13
9
13
102
325
52
171
71
136
105
122
46
108
9
3
3
39
3,471
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longformer/tokenization_longformer_fast.py
transformers.models.longformer.tokenization_longformer_fast.LongformerTokenizerFast
from typing import Optional from tokenizers import processors import json from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...tokenization_utils_base import AddedToken, BatchEncoding from .tokenization_longformer import LongformerTokenizer class LongformerTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" Longformer tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import LongformerTokenizerFast >>> tokenizer = LongformerTokenizerFast.from_pretrained("allenai/longformer-base-4096") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (Longformer tokenizer detect beginning of words by the preceding space). trim_offsets (`bool`, *optional*, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = LongformerTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs): mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs) tokenizer_component = 'post_processor' tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) if 'sep' in state: state['sep'] = tuple(state['sep']) if 'cls' in state: state['cls'] = tuple(state['cls']) changes_to_apply = False if state.get('add_prefix_space', add_prefix_space) != add_prefix_space: state['add_prefix_space'] = add_prefix_space changes_to_apply = True if state.get('trim_offsets', trim_offsets) != trim_offsets: state['trim_offsets'] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop('type')) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) @property def mask_token(self) -> str: """ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. Longformer tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily comprise the space before the *<mask>*. """ if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def mask_token(self, value): """ Overriding the default behavior of the mask token to have it eat the space before it. This is needed to preserve backward compatibility with all the previously used models based on Longformer. """ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value self._mask_token = value def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.' return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.' return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
class LongformerTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" Longformer tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import LongformerTokenizerFast >>> tokenizer = LongformerTokenizerFast.from_pretrained("allenai/longformer-base-4096") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (Longformer tokenizer detect beginning of words by the preceding space). trim_offsets (`bool`, *optional*, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. ''' def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs): pass @property def mask_token(self) -> str: ''' `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. Longformer tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily comprise the space before the *<mask>*. ''' pass @mask_token.setter def mask_token(self) -> str: ''' Overriding the default behavior of the mask token to have it eat the space before it. This is needed to preserve backward compatibility with all the previously used models based on Longformer. ''' pass def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: pass def _encode_plus(self, *args, **kwargs) -> BatchEncoding: pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass
11
4
17
2
12
3
3
0.83
1
5
1
0
8
1
8
96
229
42
102
45
73
85
57
25
48
8
3
2
20
3,472
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/configuration_longt5.py
transformers.models.longt5.configuration_longt5.LongT5Config
from ...configuration_utils import PretrainedConfig class LongT5Config(PretrainedConfig): """ This is the configuration class to store the configuration of a [`LongT5Model`]. It is used to instantiate a LongT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5 [google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 32128): Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LongT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // num_heads`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `LongT5Block`. num_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. local_radius (`int`, *optional*, defaults to 127) Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism. global_block_size (`int`, *optional*, defaults to 16) Length of blocks an input sequence is divided into for a global token representation. Used only for `encoder_attention_type = "transient-global"`. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"relu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. LongT5v1.1 uses the `"gated-gelu"` feed forward projection. Original LongT5 implementation uses `"gated-gelu"`. encoder_attention_type (`string`, *optional*, defaults to `"local"`): Type of encoder attention to be used. Should be one of `"local"` or `"transient-global"`, which are supported by LongT5 implementation. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = 'longt5' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', 'head_dim': 'd_kv'} def __init__(self, vocab_size=32128, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, local_radius=127, global_block_size=16, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='relu', is_encoder_decoder=True, encoder_attention_type='local', use_cache=True, pad_token_id=0, eos_token_id=1, **kwargs): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers self.num_heads = num_heads self.local_radius = local_radius self.global_block_size = global_block_size self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.encoder_attention_type = encoder_attention_type self.use_cache = use_cache act_info = self.feed_forward_proj.split('-') self.dense_act_fn = act_info[-1] self.is_gated_act = act_info[0] == 'gated' if len(act_info) > 1 and act_info[0] != 'gated' or len(act_info) > 2: raise ValueError(f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. Please make sure `feed_forward_proj` is of the format `gated-{{ACT_FN}}` or `{{ACT_FN}}`, e.g. 'gated-gelu' or 'relu'") if feed_forward_proj == 'gated-gelu': self.dense_act_fn = 'gelu_new' super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs)
class LongT5Config(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`LongT5Model`]. It is used to instantiate a LongT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5 [google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 32128): Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LongT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // num_heads`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `LongT5Block`. num_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. local_radius (`int`, *optional*, defaults to 127) Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism. global_block_size (`int`, *optional*, defaults to 16) Length of blocks an input sequence is divided into for a global token representation. Used only for `encoder_attention_type = "transient-global"`. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"relu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. LongT5v1.1 uses the `"gated-gelu"` feed forward projection. Original LongT5 implementation uses `"gated-gelu"`. encoder_attention_type (`string`, *optional*, defaults to `"local"`): Type of encoder attention to be used. Should be one of `"local"` or `"transient-global"`, which are supported by LongT5 implementation. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). ''' def __init__(self, vocab_size=32128, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, local_radius=127, global_block_size=16, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='relu', is_encoder_decoder=True, encoder_attention_type='local', use_cache=True, pad_token_id=0, eos_token_id=1, **kwargs): pass
2
1
64
4
58
2
4
0.76
1
2
0
0
1
19
1
1
126
8
67
48
42
51
30
25
28
4
1
1
4
3,473
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/configuration_longt5.py
transformers.models.longt5.configuration_longt5.LongT5OnnxConfig
from ...onnx import OnnxSeq2SeqConfigWithPast from collections.abc import Mapping class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = {'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}} if self.use_past: common_inputs['attention_mask'][1] = 'past_encoder_sequence + sequence' common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') return common_inputs @property def default_onnx_opset(self) -> int: return 13
class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def default_onnx_opset(self) -> int: pass
5
0
10
1
9
0
2
0
1
2
0
0
2
0
2
2
23
3
20
6
15
0
14
4
11
3
1
1
4
3,474
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5Attention
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Any, Optional, Union from .configuration_longt5 import LongT5Config from ...utils.deprecation import deprecate_kwarg from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer import math from torch import nn import torch class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads) self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) max_exact = num_buckets // 2 is_small = relative_position < max_exact relative_position_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long) relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None, cache_position=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device if cache_position is None: context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] else: context_position = cache_position[:, None].to(device) memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_position_bucket(relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance) values = self.relative_attention_bias(relative_position_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) return values @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ batch_size, seq_length = hidden_states.shape[:2] is_cross_attention = key_value_states is not None query_states = self.q(hidden_states) query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) is_updated = False if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k(current_states) value_states = self.v(current_states) key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: key_length = key_states.shape[-2] real_seq_length = query_length if query_length is not None else cache_position[-1] + 1 if not self.has_relative_attention_bias: position_bias = torch.zeros((1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device, cache_position=cache_position) position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: causal_mask = mask[:, :, :, :key_states.shape[-2]] position_bias = position_bias + causal_mask if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(batch_size, -1, self.inner_dim) attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs
class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass def prune_heads(self, heads): pass @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): ''' Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) ''' pass def compute_bias(self, query_length, key_length, device=None, cache_position=None): '''Compute binned relative position bias''' pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None): ''' Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). ''' pass
8
3
44
5
32
8
5
0.26
1
5
1
0
4
17
5
15
226
28
160
67
136
42
113
49
107
16
1
3
26
3,475
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5Block
from typing import Any, Optional, Union from ...modeling_layers import GradientCheckpointingLayer from ...utils.deprecation import deprecate_kwarg from torch import nn import torch class LongT5Block(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == 'local': attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == 'transient-global': attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError(f'For encoder attention mechanism, either `local` or `transient-global` attention type is expected, but got {config.encoder_attention_type}.') self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(LongT5LayerFF(config)) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None): self_attention_outputs = self.layer[0](hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = self_attention_outputs[0] attention_outputs = self_attention_outputs[1:] if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1](hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = cross_attention_outputs[0] if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) attention_outputs = attention_outputs + cross_attention_outputs[1:] hidden_states = self.layer[-1](hidden_states) if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) return (hidden_states,) + attention_outputs
class LongT5Block(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None): pass
4
0
48
5
41
4
6
0.09
1
8
5
0
2
2
2
12
98
11
82
27
64
7
38
12
35
6
1
2
11
3,476
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5DenseActDense
from torch import nn from .configuration_longt5 import LongT5Config from ...activations import ACT2FN import torch class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states
class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): pass def forward(self, hidden_states): pass
3
0
9
0
9
0
2
0
1
3
1
0
2
4
2
12
20
1
19
7
16
0
15
7
12
2
1
1
3
3,477
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5DenseGatedActDense
from torch import nn from .configuration_longt5 import LongT5Config from ...activations import ACT2FN class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states
class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): pass def forward(self, hidden_states): pass
3
0
7
0
7
0
1
0
1
2
1
0
2
5
2
12
16
1
15
10
12
0
15
10
12
1
1
0
2
3,478
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5EncoderModel
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from typing import Any, Optional, Union from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from .configuration_longt5 import LongT5Config from torch import nn import copy import torch @auto_docstring class LongT5EncoderModel(LongT5PreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight'] _keys_to_ignore_on_load_unexpected = ['decoder'] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base") >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return encoder_outputs
@auto_docstring class LongT5EncoderModel(LongT5PreTrainedModel): def __init__(self, config: LongT5Config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def _tie_weights(self): pass def get_encoder(self): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base") >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```''' pass
10
2
10
1
6
3
1
0.38
1
5
3
0
7
2
7
10
79
14
47
25
28
18
28
15
20
2
2
1
10
3,479
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5ForConditionalGeneration
import warnings from torch.nn import CrossEntropyLoss from torch import nn from ...generation import GenerationMixin import copy import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from typing import Any, Optional, Union from .configuration_longt5 import LongT5Config @auto_docstring(custom_intro='\n LONGT5 Model with a `language modeling` head on top.\n ') class LongT5ForConditionalGeneration(LongT5PreTrainedModel, GenerationMixin): _keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight'] _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps" ... ) >>> # Let's try a very long input. >>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt") >>> input_ids = inputs.input_ids >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to provide an overview of the literature on the role of dog ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and (decoder_inputs_embeds is None): decoder_input_ids = self._shift_right(labels) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: sequence_output = sequence_output * self.model_dim ** (-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return (loss,) + output if loss is not None else output return Seq2SeqLMOutput(loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels)
@auto_docstring(custom_intro='\n LONGT5 Model with a `language modeling` head on top.\n ') class LongT5ForConditionalGeneration(LongT5PreTrainedModel, GenerationMixin): def __init__(self, config: LongT5Config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def _tie_weights(self): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps" ... ) >>> # Let's try a very long input. >>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt") >>> input_ids = inputs.input_ids >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to provide an overview of the literature on the role of dog ```''' pass def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): pass
10
1
18
2
12
3
3
0.24
2
8
4
0
11
5
11
14
212
37
141
52
108
34
79
32
67
14
2
2
28
3,480
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5LayerCrossAttention
from torch import nn from ...utils.deprecation import deprecate_kwarg from typing import Any, Optional, Union class LongT5LayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention(normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] return outputs
class LongT5LayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None): pass
4
0
17
0
17
1
1
0.03
1
4
2
0
2
3
2
12
36
1
35
22
20
1
12
10
9
1
1
0
2
3,481
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5LayerFF
from torch import nn from .configuration_longt5 import LongT5Config class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states
class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): pass def forward(self, hidden_states): pass
3
0
7
1
7
0
2
0
1
5
4
0
2
3
2
12
16
2
14
7
11
0
13
7
10
2
1
1
3
3,482
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5LayerLocalSelfAttention
from torch import nn from typing import Any, Optional, Union class LongT5LayerLocalSelfAttention(nn.Module): """Local self attention used in encoder""" def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention(normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] return outputs
class LongT5LayerLocalSelfAttention(nn.Module): '''Local self attention used in encoder''' def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any): pass
3
1
13
0
13
1
1
0.12
1
5
2
0
2
3
2
12
29
2
26
17
15
3
12
9
9
1
1
0
2
3,483
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5LayerNorm
from torch import nn import torch class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states
class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): ''' Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean. ''' pass def forward(self, hidden_states): pass
3
1
11
2
5
4
2
0.73
1
1
0
0
2
2
2
12
23
4
11
6
8
8
11
6
8
2
1
1
3
3,484
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5LayerSelfAttention
from torch import nn from typing import Any, Optional, Union from ...utils.deprecation import deprecate_kwarg class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention(normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] return outputs
class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None): pass
4
0
16
0
16
1
1
0.03
1
4
2
0
2
3
2
12
34
1
33
19
20
1
12
9
9
1
1
0
2
3,485
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5LayerTransientGlobalSelfAttention
from torch import nn from typing import Any, Optional, Union class LongT5LayerTransientGlobalSelfAttention(nn.Module): """Transient-Global self attention used in encoder""" def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention(normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] return outputs
class LongT5LayerTransientGlobalSelfAttention(nn.Module): '''Transient-Global self attention used in encoder''' def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None): pass def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any): pass
3
1
14
0
14
1
1
0.11
1
5
2
0
2
3
2
12
31
2
28
17
17
3
12
9
9
1
1
0
2
3,486
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5LocalAttention
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer import math from torch import nn import torch from .configuration_longt5 import LongT5Config class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool=False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads) self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) max_exact = num_buckets // 2 is_small = relative_position < max_exact relative_position_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long) relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): """Compute binned relative position bias""" target_device = self.relative_attention_bias.weight.device if self.relative_attention_bias.weight.device.type != 'meta' else None memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device) context_position = memory_position[block_length:-block_length] relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket(relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance) values = self.relative_attention_bias(relative_position_bucket) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward(self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False): batch_size, seq_length = hidden_states.shape[:2] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): """reshape""" return states.contiguous().view(batch_size, -1, self.inner_dim) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) scores = torch.einsum('...qhd,...khd->...hqk', query_states, key_states) if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros((1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if mask is not None: mask = torch.where(mask > 0, 0.0, -10000000000.0) position_bias = position_bias + mask.transpose(1, 2) scores += position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum('...hqk,...khd->...qhd', attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs
class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool=False) -> None: pass def prune_heads(self, heads): pass @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): ''' Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) ''' pass def compute_bias(self, block_length: int): '''Compute binned relative position bias''' pass def forward(self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False): pass def shape(states): '''projection''' pass def unshape(states): '''reshape''' pass
9
4
27
3
18
6
2
0.35
1
5
1
0
4
18
5
15
190
25
124
54
108
43
93
46
85
7
1
3
17
3,487
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5Model
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from typing import Any, Optional, Union from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from .configuration_longt5 import LongT5Config import warnings from torch import nn import copy import torch @auto_docstring class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight'] _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, LongT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5Model.from_pretrained("google/long-t5-local-base") >>> # Let's try a very long encoder input. >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) hidden_states = encoder_outputs[0] decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class LongT5Model(LongT5PreTrainedModel): def __init__(self, config: LongT5Config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def _tie_weights(self): pass def get_encoder(self): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, LongT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5Model.from_pretrained("google/long-t5-local-base") >>> # Let's try a very long encoder input. >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```''' pass
10
2
17
2
13
3
2
0.22
1
8
4
0
8
3
8
11
154
23
107
38
78
24
49
19
40
10
2
2
19
3,488
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5PreTrainedModel
from ...modeling_utils import PreTrainedModel from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from .configuration_longt5 import LongT5Config import torch @auto_docstring class LongT5PreTrainedModel(PreTrainedModel): config: LongT5Config base_model_prefix = 'transformer' supports_gradient_checkpointing = True _no_split_modules = ['LongT5Block'] _can_compile_fullgraph = False @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = {'decoder_input_ids': input_ids, 'input_ids': input_ids, 'decoder_attention_mask': input_mask} return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, 'lm_head') and (not self.config.tie_word_embeddings): module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): module.wi.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi, 'bias') and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5)) if hasattr(module.wo, 'bias') and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi_0, 'bias') and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5)) if hasattr(module.wi_1, 'bias') and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5)) if hasattr(module.wo, 'bias') and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * (n_heads * key_value_proj_dim) ** (-0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError('self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the pad_token_id. See LongT5 docs for more information.') if is_torch_fx_proxy(input_ids): shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError('self.model.config.pad_token_id has to be defined.') shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids
@auto_docstring class LongT5PreTrainedModel(PreTrainedModel): @property def dummy_inputs(self): pass def _init_weights(self, module): '''Initialize the weights''' pass def _shift_right(self, input_ids): pass
6
1
27
1
22
4
6
0.25
1
10
9
4
3
0
3
3
100
8
75
21
70
19
60
20
56
14
1
3
19
3,489
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5Stack
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from typing import Any, Optional, Union from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from ...modeling_attn_mask_utils import AttentionMaskConverter from torch import nn import torch class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.block = nn.ModuleList([LongT5Block(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers)]) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.gradient_checkpointing = False self.post_init() def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = 'decoder_' if self.is_decoder else '' raise ValueError(f'You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if inputs_embeds is None: assert self.embed_tokens is not None, 'You have to initialize the model with valid token embeddings' inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) else: past_key_values = DynamicCache(config=self.config) elif not self.is_decoder: past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if attention_mask is None and (not is_torchdynamo_compiling()): mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.is_decoder: causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions) elif self.config.encoder_attention_type == 'local': causal_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: causal_mask = attention_mask if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.is_decoder else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=return_dict, cache_position=cache_position) hidden_states = layer_outputs[0] position_bias = layer_outputs[1] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): pass def set_input_embeddings(self, new_embeddings): pass def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass
7
1
60
6
46
8
10
0.19
1
16
8
0
5
8
6
9
368
40
279
81
240
52
141
49
134
42
2
3
59
3,490
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/longt5/modeling_longt5.py
transformers.models.longt5.modeling_longt5.LongT5TransientGlobalAttention
from .configuration_longt5 import LongT5Config from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer import math from torch import nn import torch class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool=False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads) self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) max_exact = num_buckets // 2 is_small = relative_position < max_exact relative_position_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long) relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): """Compute binned relative position bias""" target_device = self.relative_attention_bias.weight.device if self.relative_attention_bias.weight.device.type != 'meta' else None memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device) context_position = memory_position[block_length:-block_length] relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket(relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance) values = self.relative_attention_bias(relative_position_bucket) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -10000000000.0) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket(side_relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) side_bias = side_bias.permute([0, 3, 1, 2]) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def forward(self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False): batch_size, seq_length = hidden_states.shape[:2] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): """reshape""" return states.contiguous().view(batch_size, -1, self.inner_dim) block_ids, global_segment_ids = _make_global_fixed_block_ids(mask if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size) _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) reps = [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) scores = torch.einsum('...qhd,...khd->...hqk', query_states, key_states) if mask is not None: local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -10000000000.0) else: local_attention_mask = None if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros((1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None: position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) if mask is None: mask = torch.ones(batch_size, seq_length) side_position_bias = self.compute_side_bias(mask, global_segment_ids) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum('...hqk,...khd->...qhd', attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs
class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool=False) -> None: pass def prune_heads(self, heads): pass @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): ''' Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) ''' pass def compute_bias(self, block_length: int): '''Compute binned relative position bias''' pass def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: pass def forward(self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False): pass def shape(states): '''projection''' pass def unshape(states): '''reshape''' pass
10
4
32
4
21
8
3
0.37
1
7
2
0
5
20
6
16
261
33
167
70
150
62
127
62
118
10
1
3
22
3,491
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/configuration_luke.py
transformers.models.luke.configuration_luke.LukeConfig
from ...configuration_utils import PretrainedConfig class LukeConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`LukeModel`]. It is used to instantiate a LUKE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LUKE [studio-ousia/luke-base](https://huggingface.co/studio-ousia/luke-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50267): Vocabulary size of the LUKE model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LukeModel`]. entity_vocab_size (`int`, *optional*, defaults to 500000): Entity vocabulary size of the LUKE model. Defines the number of different entities that can be represented by the `entity_ids` passed when calling [`LukeModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. entity_emb_size (`int`, *optional*, defaults to 256): The number of dimensions of the entity embedding. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`LukeModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_entity_aware_attention (`bool`, *optional*, defaults to `True`): Whether or not the model should use the entity-aware self-attention mechanism proposed in [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention (Yamada et al.)](https://huggingface.co/papers/2010.01057). classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. Examples: ```python >>> from transformers import LukeConfig, LukeModel >>> # Initializing a LUKE configuration >>> configuration = LukeConfig() >>> # Initializing a model from the configuration >>> model = LukeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'luke' def __init__(self, vocab_size=50267, entity_vocab_size=500000, hidden_size=768, entity_emb_size=256, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_entity_aware_attention=True, classifier_dropout=None, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): """Constructs LukeConfig.""" super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.entity_vocab_size = entity_vocab_size self.hidden_size = hidden_size self.entity_emb_size = entity_emb_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_entity_aware_attention = use_entity_aware_attention self.classifier_dropout = classifier_dropout
class LukeConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`LukeModel`]. It is used to instantiate a LUKE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LUKE [studio-ousia/luke-base](https://huggingface.co/studio-ousia/luke-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50267): Vocabulary size of the LUKE model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LukeModel`]. entity_vocab_size (`int`, *optional*, defaults to 500000): Entity vocabulary size of the LUKE model. Defines the number of different entities that can be represented by the `entity_ids` passed when calling [`LukeModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. entity_emb_size (`int`, *optional*, defaults to 256): The number of dimensions of the entity embedding. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`LukeModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_entity_aware_attention (`bool`, *optional*, defaults to `True`): Whether or not the model should use the entity-aware self-attention mechanism proposed in [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention (Yamada et al.)](https://huggingface.co/papers/2010.01057). classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. Examples: ```python >>> from transformers import LukeConfig, LukeModel >>> # Initializing a LUKE configuration >>> configuration = LukeConfig() >>> # Initializing a model from the configuration >>> model = LukeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=50267, entity_vocab_size=500000, hidden_size=768, entity_emb_size=256, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_entity_aware_attention=True, classifier_dropout=None, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): '''Constructs LukeConfig.''' pass
2
2
42
1
40
1
1
1.5
1
1
0
0
1
16
1
1
116
11
42
41
18
63
20
19
18
1
1
0
1
3,492
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.BaseLukeModelOutput
from typing import Optional, Union from dataclasses import dataclass from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling import torch from ...utils import ModelOutput, auto_docstring, logging @dataclass @auto_docstring(custom_intro="\n Base class for model's outputs, with potential hidden states and attentions.\n ") class BaseLukeModelOutput(BaseModelOutput): """ entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ entity_last_hidden_state: Optional[torch.FloatTensor] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro="\n Base class for model's outputs, with potential hidden states and attentions.\n ") class BaseLukeModelOutput(BaseModelOutput): ''' entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. ''' pass
3
1
0
0
0
0
0
7
1
0
0
0
0
0
0
0
28
4
3
3
2
21
3
3
2
0
2
0
0
3,493
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.BaseLukeModelOutputWithPooling
from ...utils import ModelOutput, auto_docstring, logging from typing import Optional, Union from dataclasses import dataclass from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling import torch @dataclass @auto_docstring(custom_intro='\n Base class for outputs of the LUKE model.\n ') class BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling): """ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ entity_last_hidden_state: Optional[torch.FloatTensor] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Base class for outputs of the LUKE model.\n ') class BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling): ''' pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. ''' pass
3
1
0
0
0
0
0
7.67
1
0
0
0
0
0
0
0
28
2
3
3
2
23
3
3
2
0
2
0
0
3,494
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.EntityClassificationOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Outputs of entity classification models.\n ') class EntityClassificationOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Outputs of entity classification models.\n ') class EntityClassificationOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. ''' pass
3
1
0
0
0
0
0
3.33
1
0
0
0
0
0
0
0
28
2
6
6
5
20
6
6
5
0
1
0
0
3,495
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.EntityPairClassificationOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Outputs of entity pair classification models.\n ') class EntityPairClassificationOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Outputs of entity pair classification models.\n ') class EntityPairClassificationOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. ''' pass
3
1
0
0
0
0
0
3.33
1
0
0
0
0
0
0
0
28
2
6
6
5
20
6
6
5
0
1
0
0
3,496
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.EntityPredictionHead
import torch from torch import nn class EntityPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.config = config self.transform = EntityPredictionHeadTransform(config) self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states
class EntityPredictionHead(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
6
1
5
0
1
0
1
2
1
0
2
4
2
12
13
2
11
7
8
0
11
7
8
1
1
0
2
3,497
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.EntityPredictionHeadTransform
from ...activations import ACT2FN, gelu from torch import nn class EntityPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.entity_emb_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class EntityPredictionHeadTransform(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
7
0
7
0
2
0
1
2
0
0
2
3
2
12
15
1
14
6
11
0
13
6
10
2
1
1
3
3,498
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.EntitySpanClassificationOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Outputs of entity span classification models.\n ') class EntitySpanClassificationOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, entity_length, config.num_labels)`): Classification scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Outputs of entity span classification models.\n ') class EntitySpanClassificationOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, entity_length, config.num_labels)`): Classification scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. ''' pass
3
1
0
0
0
0
0
3.33
1
0
0
0
0
0
0
0
28
2
6
6
5
20
6
6
5
0
1
0
0
3,499
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeAttention
import torch from torch import nn class LukeAttention(nn.Module): def __init__(self, config): super().__init__() self.self = LukeSelfAttention(config) self.output = LukeSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): raise NotImplementedError('LUKE does not support the pruning of attention heads') def forward(self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False): word_size = word_hidden_states.size(1) self_outputs = self.self(word_hidden_states, entity_hidden_states, attention_mask, head_mask, output_attentions) if entity_hidden_states is None: concat_self_outputs = self_outputs[0] concat_hidden_states = word_hidden_states else: concat_self_outputs = torch.cat(self_outputs[:2], dim=1) concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1) attention_output = self.output(concat_self_outputs, concat_hidden_states) word_attention_output = attention_output[:, :word_size, :] if entity_hidden_states is None: entity_attention_output = None else: entity_attention_output = attention_output[:, word_size:, :] outputs = (word_attention_output, entity_attention_output) + self_outputs[2:] return outputs
class LukeAttention(nn.Module): def __init__(self, config): pass def prune_heads(self, heads): pass def forward(self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False): pass
4
0
14
1
12
0
2
0.03
1
5
2
0
3
3
3
13
45
6
38
22
27
1
23
15
19
3
1
1
5