text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. """ import glob import os import pickle import re from collections import Counter, OrderedDict from typing import Optional import numpy as np from ....tokenization_utils import PreTrainedTokenizer from ....utils import ( cached_file, check_torch_load_is_safe, is_sacremoses_available, is_torch_available, logging, requires_backends, strtobool, torch_only_method, ) if is_sacremoses_available(): import sacremoses as sm if is_torch_available(): import torch logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "pretrained_vocab_file": "vocab.pkl", "pretrained_vocab_file_torch": "vocab.bin", "vocab_file": "vocab.txt", } PRETRAINED_CORPUS_ARCHIVE_MAP = { "transfo-xl/transfo-xl-wt103": "https://huggingface.co/transfo-xl/transfo-xl-wt103/resolve/main/corpus.bin", } CORPUS_NAME = "corpus.bin" MATCH_NUMBERS = r"(?<=\d)[,.](?=\d)", r" @\g<0>@ " DETOKENIZE_NUMBERS = [(r" @\,@ ", r","), (r" @\.@ ", r".")] def tokenize_numbers(text_array: list[str]) -> list[str]: """ Splits large comma-separated numbers and floating point values. This is done by replacing commas with ' @,@ ' and dots with ' @.@ '. Args: text_array: An already tokenized text as list. Returns: A list of strings with tokenized numbers. Example: ```python >>> tokenize_numbers(["$", "5,000", "1.73", "m"]) ['$', '5', '@,@', '000', '1', '@.@', '73', 'm'] ```""" tokenized = [] for i in range(len(text_array)): reg, sub = MATCH_NUMBERS replaced = re.sub(reg, sub, text_array[i]).split() tokenized.extend(replaced) return tokenized def detokenize_numbers(text: str) -> str: """ Inverts the operation of *tokenize_numbers*. This is replacing ' @,@ ' and ' @.@' by ',' and '.'. Args: text: A string where the number should be detokenized. Returns: A detokenized string. Example: ```python >>> detokenize_numbers("$ 5 @,@ 000 1 @.@ 73 m") '$ 5,000 1.73 m' ```""" for reg, sub in DETOKENIZE_NUMBERS: text = re.sub(reg, sub, text) return text class TransfoXLTokenizer(PreTrainedTokenizer): """ Construct a Transformer-XL tokenizer adapted from Vocab class in [the original code](https://github.com/kimiyoung/transformer-xl). The Transformer-XL tokenizer is a word-level tokenizer (no sub-word tokenization). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: special (`list[str]`, *optional*): A list of special tokens (to be treated by the original implementation of this tokenizer). min_freq (`int`, *optional*, defaults to 0): The minimum number of times a token has to be present in order to be kept in the vocabulary (otherwise it will be mapped to `unk_token`). max_size (`int`, *optional*): The maximum size of the vocabulary. If left unset, it will default to the size of the vocabulary found after excluding the tokens according to the `min_freq` rule. lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. delimiter (`str`, *optional*): The delimiter used between tokens. vocab_file (`str`, *optional*): File containing the vocabulary (from the original implementation). pretrained_vocab_file (`str`, *optional*): File containing the vocabulary as saved with the `save_pretrained()` method. never_split (`list[str]`, *optional*): List of tokens that should never be split. If no list is specified, will simply use the existing special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. eos_token (`str`, *optional*, defaults to `"<eos>"`): The end of sequence token. additional_special_tokens (`list[str]`, *optional*, defaults to `['<formula>']`): A list of additional special tokens (for the HuggingFace functionality). language (`str`, *optional*, defaults to `"en"`): The language of this tokenizer (used for mose preprocessing). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids"] def __init__( self, special=None, min_freq=0, max_size=None, lower_case=False, delimiter=None, vocab_file=None, pretrained_vocab_file: Optional[str] = None, never_split=None, unk_token="<unk>", eos_token="<eos>", additional_special_tokens=["<formula>"], language="en", **kwargs, ): logger.error( "`TransfoXL` was deprecated due to security issues linked to `pickle.load` in `TransfoXLTokenizer`. " "See more details on this model's documentation page: " "`https://github.com/huggingface/transformers/blob/main/docs/source/en/model_doc/transfo-xl.md`." ) requires_backends(self, "sacremoses") if special is None: special = [] self.counter = Counter() self.special = special self.min_freq = min_freq self.max_size = max_size self.lower_case = lower_case self.delimiter = delimiter self.vocab_file = vocab_file self.punctuation_symbols = '!"#$%&()*+,-./\\:;<=>?@[\\]^_`{|}~' self.punction_without_space_before_pattern = re.compile(rf"[^\s][{self.punctuation_symbols}]") self.punctuation_with_space_around_pattern = self._compile_space_around_punctuation_pattern() self.language = language self.moses_punct_normalizer = sm.MosesPunctNormalizer(language) self.moses_tokenizer = sm.MosesTokenizer(language) self.moses_detokenizer = sm.MosesDetokenizer(language) self.idx2sym = [] self.sym2idx = OrderedDict() # This try... catch... is not beautiful but honestly this tokenizer was not made to be used # in a library like ours, at all. try: vocab_dict = None if pretrained_vocab_file is not None: # Priority on pickle files (support PyTorch and TF) if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")): raise ValueError( "This part uses `pickle.load` which is insecure and will execute arbitrary code that is " "potentially malicious. It's recommended to never unpickle data that could have come from an " "untrusted source, or that could have been tampered with. If you already verified the pickle " "data and decided to use it, you can set the environment variable " "`TRUST_REMOTE_CODE` to `True` to allow it." ) with open(pretrained_vocab_file, "rb") as f: vocab_dict = pickle.load(f) # Loading a torch-saved transfo-xl vocab dict with pickle results in an integer # Entering this if statement means that we tried to load a torch-saved file with pickle, and we failed. # We therefore load it with torch, if it's available. if isinstance(vocab_dict, int): if not is_torch_available(): raise ImportError( "Not trying to load dict with PyTorch as you need to install pytorch to load " "from a PyTorch pretrained vocabulary, " "or activate it with environment variables USE_TORCH=1 and USE_TF=0." ) check_torch_load_is_safe() vocab_dict = torch.load(pretrained_vocab_file, weights_only=True) if vocab_dict is not None: for key, value in vocab_dict.items(): if key not in self.__dict__ or key in ["sym2idx", "idx2sym"]: self.__dict__[key] = value elif vocab_file is not None: self.build_vocab() except Exception as e: raise ValueError( f"Unable to parse file {pretrained_vocab_file}. Unknown format. " "If you tried to load a model saved through TransfoXLTokenizerFast, " "please note they are not compatible." ) from e if vocab_file is not None: self.build_vocab() super().__init__( special=special, min_freq=min_freq, max_size=max_size, lower_case=lower_case, delimiter=delimiter, vocab_file=vocab_file, pretrained_vocab_file=pretrained_vocab_file, never_split=never_split, unk_token=unk_token, eos_token=eos_token, additional_special_tokens=additional_special_tokens, language=language, **kwargs, ) # these are not required to initialize the parent class as only used when tokenizing. if never_split is None: never_split = self.all_special_tokens self.never_split = never_split @property def do_lower_case(self): return self.lower_case def _compile_space_around_punctuation_pattern(self): look_ahead_for_special_token = f"(?=[{self.punctuation_symbols}])" look_ahead_to_match_all_except_space = r"(?=[^\s])" return re.compile(r"" + look_ahead_for_special_token + look_ahead_to_match_all_except_space) def count_file(self, path, verbose=False, add_eos=False): if verbose: logger.info(f"counting file {path} ...") assert os.path.exists(path), f"Input file {path} not found" sents = [] with open(path, "r", encoding="utf-8") as f: for idx, line in enumerate(f): if verbose and idx > 0 and idx % 500000 == 0: logger.info(f" line {idx}") symbols = self.tokenize(line, add_eos=add_eos) self.counter.update(symbols) sents.append(symbols) return sents def count_sents(self, sents, verbose=False): """ sents : a list of sentences, each a list of tokenized symbols """ if verbose: logger.info(f"counting {len(sents)} sents ...") for idx, symbols in enumerate(sents): if verbose and idx > 0 and idx % 500000 == 0: logger.info(f" line {idx}") self.counter.update(symbols) def _build_from_file(self, vocab_file): self.idx2sym = [] self.sym2idx = OrderedDict() with open(vocab_file, "r", encoding="utf-8") as f: for line in f: symb = line.strip().split()[0] self.add_symbol(symb) if "<UNK>" in self.sym2idx: self.unk_idx = self.sym2idx["<UNK>"] elif "<unk>" in self.sym2idx: self.unk_idx = self.sym2idx["<unk>"] else: raise ValueError("Token not in vocabulary and no <unk> token in vocabulary for replacement.") def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["pretrained_vocab_file"], ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "wb") as f: pickle.dump(self.__dict__, f) return (vocab_file,) def build_vocab(self): if self.vocab_file: logger.info(f"building vocab from {self.vocab_file}") self._build_from_file(self.vocab_file) logger.info(f"Final vocab size {len(self.sym2idx)}") else: logger.info(f"building vocab with min_freq={self.min_freq}, max_size={self.max_size}") self.idx2sym = [] self.sym2idx = OrderedDict() for sym in self.special: self.add_special(sym) for sym, cnt in self.counter.most_common(self.max_size): if cnt < self.min_freq: break self.add_symbol(sym) logger.info(f"Final vocab size {len(self.sym2idx)} from {len(self.counter)} unique tokens") @torch_only_method def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False): if verbose: logger.info(f"encoding file {path} ...") assert os.path.exists(path), f"Output file {path} not found" encoded = [] with open(path, "r", encoding="utf-8") as f: for idx, line in enumerate(f): if verbose and idx > 0 and idx % 500000 == 0: logger.info(f" line {idx}") symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded @torch_only_method def encode_sents(self, sents, ordered=False, verbose=False): if verbose: logger.info(f"encoding {len(sents)} sents ...") encoded = [] for idx, symbols in enumerate(sents): if verbose and idx > 0 and idx % 500000 == 0: logger.info(f" line {idx}") encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def add_special(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 setattr(self, f"{sym.strip('<>')}_idx", self.sym2idx[sym]) def add_symbol(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 def move_added_token(self, token: str, target_idx: int): """ Moves an added token to a specific position in the vocab. This method should be used when resizing an embedding layer other than the last one in the `AdaptiveEmbedding` in order to move the token in the tokenizer from the default position (at the very end) to the desired one. Args: token: The token to move to a specific position in the vocab. target_idx: The position where the token should be moved to. """ assert token in self.added_tokens_encoder, "Token which should be moved has to be an added token" assert token not in self.idx2sym, "Token which should be moved is already in vocab" # Insert sym into vocab self.idx2sym.insert(target_idx, token) self.sym2idx[token] = target_idx # Shift following indices in sym2idx for idx in range(target_idx + 1, len(self.idx2sym)): current_sym = self.idx2sym[idx] self.sym2idx[current_sym] = idx # Delete token from added_tokens old_index = self._added_tokens_encoder.pop(token) self._added_tokens_decoder.pop(old_index) def moses_punct_norm(self, text): return self.moses_punct_normalizer.normalize(text) def moses_tokenize(self, text): return self.moses_tokenizer.tokenize( text, aggressive_dash_splits=True, return_str=False, escape=False, protected_patterns=self.never_split ) def moses_pipeline(self, text: str) -> list[str]: """ Does basic tokenization using [`sacremoses.MosesPunctNormalizer`] and [`sacremoses.MosesTokenizer`] with *aggressive_dash_splits=True* (see [`sacremoses.tokenize.MosesTokenizer.tokenize`]). Additionally, large comma-separated numbers and floating point values are split. E.g. "23,000 people are 1.80m tall" -> "23 @,@ 000 people are 1 @.@ 80m tall" Args: text: Text to be tokenize Returns: A list of tokenized string Example: ```python >>> tokenizer = TransfoXLTokenizer.from_pretrained("transfo-xl/transfo-xl-wt103") >>> tokenizer.moses_pipeline("23,000 people are 1.80 m tall") ['23', '@,@', '000', 'people', 'are', '1', '@.@', '80', 'm', 'tall'] ```""" text = self.moses_punct_norm(text) text = self.moses_tokenize(text) text = tokenize_numbers(text) return text def _convert_id_to_token(self, idx): """Converts an id in a token (BPE) using the vocab.""" assert 0 <= idx < len(self), f"Index {idx} out of vocabulary range" return self.idx2sym[idx] def _convert_token_to_id(self, sym): """Converts a token (str) in an id using the vocab.""" if sym in self.sym2idx: return self.sym2idx[sym] else: # logger.info(f'encounter unk {sym}') # assert '<eos>' not in sym if hasattr(self, "unk_idx"): return self.sym2idx.get(sym, self.unk_idx) # Backward compatibility with pre-trained models elif "<unk>" in self.sym2idx: return self.sym2idx["<unk>"] elif "<UNK>" in self.sym2idx: return self.sym2idx["<UNK>"] else: raise ValueError("Token not in vocabulary and no <unk> token in vocabulary for replacement.") def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. Additionally, the split numbers are converted back into it's original form. """ out_string = self.moses_detokenizer.detokenize(tokens) return detokenize_numbers(out_string).strip() @torch_only_method def convert_to_tensor(self, symbols): return torch.LongTensor(self.convert_tokens_to_ids(symbols)) @property def vocab_size(self): return len(self.idx2sym) def get_vocab(self): vocab = self.sym2idx.copy() vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, line, add_eos=False, add_double_eos=False): line = line.strip() # convert to lower case if self.lower_case: line = line.lower() # empty delimiter '' will evaluate False if self.delimiter == "": symbols = line else: symbols = self.moses_pipeline(line) if add_double_eos: # lm1b return ["<S>"] + symbols + ["<S>"] elif add_eos: return symbols + ["<eos>"] else: return symbols class LMOrderedIterator: def __init__(self, data, bsz, bptt, device="cpu", ext_len=None): """ data -- LongTensor -- the LongTensor is strictly ordered """ self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device # Work out how cleanly we can divide the dataset into bsz parts. self.n_step = data.size(0) // bsz # Trim off any extra elements that wouldn't cleanly fit (remainders). data = data.narrow(0, 0, self.n_step * bsz) # Evenly divide the data across the bsz batches. self.data = data.view(bsz, -1).t().contiguous().to(device) # Number of mini-batches self.n_batch = (self.n_step + self.bptt - 1) // self.bptt def get_batch(self, i, bptt=None): if bptt is None: bptt = self.bptt seq_len = min(bptt, self.data.size(0) - 1 - i) end_idx = i + seq_len beg_idx = max(0, i - self.ext_len) data = self.data[beg_idx:end_idx] target = self.data[i + 1 : i + 1 + seq_len] data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) return data_out, target_out, seq_len def get_fixlen_iter(self, start=0): for i in range(start, self.data.size(0) - 1, self.bptt): yield self.get_batch(i) def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3): max_len = self.bptt + max_deviation * std i = start while True: bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0 bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std)))) data, target, seq_len = self.get_batch(i, bptt) i += seq_len yield data, target, seq_len if i >= self.data.size(0) - 2: break def __iter__(self): return self.get_fixlen_iter() class LMShuffledIterator: def __init__(self, data, bsz, bptt, device="cpu", ext_len=None, shuffle=False): """ data -- list[LongTensor] -- there is no order among the LongTensors """ self.data = data self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self): # index iterator epoch_indices = np.random.permutation(len(self.data)) if self.shuffle else np.array(range(len(self.data))) # sentence iterator for idx in epoch_indices: yield self.data[idx] @torch_only_method def stream_iterator(self, sent_stream): # streams for each data in the batch streams = [None] * self.bsz data = torch.LongTensor(self.bptt, self.bsz) target = torch.LongTensor(self.bptt, self.bsz) n_retain = 0 while True: # data : [n_retain+bptt x bsz] # target : [bptt x bsz] data[n_retain:].fill_(-1) target.fill_(-1) valid_batch = True for i in range(self.bsz): n_filled = 0 try: while n_filled < self.bptt: if streams[i] is None or len(streams[i]) <= 1: streams[i] = next(sent_stream) # number of new tokens to fill in n_new = min(len(streams[i]) - 1, self.bptt - n_filled) # first n_retain tokens are retained from last batch data[n_retain + n_filled : n_retain + n_filled + n_new, i] = streams[i][:n_new] target[n_filled : n_filled + n_new, i] = streams[i][1 : n_new + 1] streams[i] = streams[i][n_new:] n_filled += n_new except StopIteration: valid_batch = False break if not valid_batch: return data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) yield data_out, target_out, self.bptt n_retain = min(data.size(0), self.ext_len) if n_retain > 0: data[:n_retain] = data[-n_retain:] data.resize_(n_retain + self.bptt, data.size(1)) def __iter__(self): # sent_stream is an iterator sent_stream = self.get_sent_stream() for batch in self.stream_iterator(sent_stream): yield batch class LMMultiFileIterator(LMShuffledIterator): def __init__(self, paths, vocab, bsz, bptt, device="cpu", ext_len=None, shuffle=False): self.paths = paths self.vocab = vocab self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self, path): sents = self.vocab.encode_file(path, add_double_eos=True) if self.shuffle: np.random.shuffle(sents) sent_stream = iter(sents) return sent_stream def __iter__(self): if self.shuffle: np.random.shuffle(self.paths) for path in self.paths: # sent_stream is an iterator sent_stream = self.get_sent_stream(path) for batch in self.stream_iterator(sent_stream): yield batch class TransfoXLCorpus: @classmethod @torch_only_method def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a pre-processed corpus. """ vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) is_local = os.path.isdir(pretrained_model_name_or_path) # redirect to the cache, if necessary try: resolved_corpus_file = cached_file(pretrained_model_name_or_path, CORPUS_NAME, cache_dir=cache_dir) except OSError: logger.error( f"Corpus '{pretrained_model_name_or_path}' was not found in corpus list" f" ({', '.join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys())}. We assumed '{pretrained_model_name_or_path}'" f" was a path or url but couldn't find files {CORPUS_NAME} at this path or url." ) return None if is_local: logger.info(f"loading corpus file {resolved_corpus_file}") else: logger.info(f"loading corpus file {CORPUS_NAME} from cache at {resolved_corpus_file}") # Instantiate tokenizer. corpus = cls(*inputs, **kwargs) check_torch_load_is_safe() corpus_dict = torch.load(resolved_corpus_file, weights_only=True) for key, value in corpus_dict.items(): corpus.__dict__[key] = value corpus.vocab = vocab if corpus.train is not None: corpus.train = torch.tensor(corpus.train, dtype=torch.long) if corpus.valid is not None: corpus.valid = torch.tensor(corpus.valid, dtype=torch.long) if corpus.test is not None: corpus.test = torch.tensor(corpus.test, dtype=torch.long) return corpus def __init__(self, *args, **kwargs): self.vocab = TransfoXLTokenizer(*args, **kwargs) self.dataset = None self.train = None self.valid = None self.test = None def build_corpus(self, path, dataset): self.dataset = dataset if self.dataset in ["ptb", "wt2", "enwik8", "text8"]: self.vocab.count_file(os.path.join(path, "train.txt")) self.vocab.count_file(os.path.join(path, "valid.txt")) self.vocab.count_file(os.path.join(path, "test.txt")) elif self.dataset == "wt103": self.vocab.count_file(os.path.join(path, "train.txt")) elif self.dataset == "lm1b": train_path_pattern = os.path.join( path, "1-billion-word-language-modeling-benchmark-r13output", "training-monolingual.tokenized.shuffled", "news.en-*", ) train_paths = glob.glob(train_path_pattern) # the vocab will load from file when build_vocab() is called self.vocab.build_vocab() if self.dataset in ["ptb", "wt2", "wt103"]: self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True) self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True) self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True) elif self.dataset in ["enwik8", "text8"]: self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True, add_eos=False) self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True, add_eos=False) self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True, add_eos=False) elif self.dataset == "lm1b": self.train = train_paths self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=False, add_double_eos=True) self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=False, add_double_eos=True) def get_iterator(self, split, *args, **kwargs): if split == "train": if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]: data_iter = LMOrderedIterator(self.train, *args, **kwargs) elif self.dataset == "lm1b": kwargs["shuffle"] = True data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs) elif split in ["valid", "test"]: data = self.valid if split == "valid" else self.test if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]: data_iter = LMOrderedIterator(data, *args, **kwargs) elif self.dataset == "lm1b": data_iter = LMShuffledIterator(data, *args, **kwargs) else: data_iter = None raise ValueError(f"Split not recognized: {split}") return data_iter @torch_only_method def get_lm_corpus(datadir, dataset): fn = os.path.join(datadir, "cache.pt") fn_pickle = os.path.join(datadir, "cache.pkl") if os.path.exists(fn): logger.info("Loading cached dataset...") check_torch_load_is_safe() corpus = torch.load(fn_pickle, weights_only=True) elif os.path.exists(fn): logger.info("Loading cached dataset from pickle...") if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")): raise ValueError( "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially " "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or " "that could have been tampered with. If you already verified the pickle data and decided to use it, " "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it." ) with open(fn, "rb") as fp: corpus = pickle.load(fp) else: logger.info(f"Producing dataset {dataset}...") kwargs = {} if dataset in ["wt103", "wt2"]: kwargs["special"] = ["<eos>"] kwargs["lower_case"] = False elif dataset == "ptb": kwargs["special"] = ["<eos>"] kwargs["lower_case"] = True elif dataset == "lm1b": kwargs["special"] = [] kwargs["lower_case"] = False kwargs["vocab_file"] = os.path.join(datadir, "1b_word_vocab.txt") elif dataset in ["enwik8", "text8"]: pass corpus = TransfoXLCorpus(datadir, dataset, **kwargs) torch.save(corpus, fn) return corpus __all__ = ["TransfoXLCorpus", "TransfoXLTokenizer"]
transformers/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py", "repo_id": "transformers", "token_count": 15078 }
473
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DINOv2 with Registers checkpoints from the original repository. URL: https://github.com/facebookresearch/dinov2/tree/main """ import argparse import json from pathlib import Path import requests import torch import torch.nn as nn from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import ( BitImageProcessor, Dinov2WithRegistersConfig, Dinov2WithRegistersForImageClassification, Dinov2WithRegistersModel, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dinov2_with_registers_config(model_name, image_classifier=False): config = Dinov2WithRegistersConfig(image_size=518, patch_size=14) # size of the architecture if "vits" in model_name: config.hidden_size = 384 config.num_attention_heads = 6 elif "vitb" in model_name: pass elif "vitl" in model_name: config.hidden_size = 1024 config.num_hidden_layers = 24 config.num_attention_heads = 16 elif "vitg" in model_name: config.use_swiglu_ffn = True config.hidden_size = 1536 config.num_hidden_layers = 40 config.num_attention_heads = 24 else: raise ValueError("Model not supported") if image_classifier: repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" config.num_labels = 1000 config.id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) config.id2label = {int(k): v for k, v in config.id2label.items()} return config def create_rename_keys(config): rename_keys = [] # fmt: off # patch embedding layer rename_keys.append(("cls_token", "embeddings.cls_token")) rename_keys.append(("mask_token", "embeddings.mask_token")) rename_keys.append(("pos_embed", "embeddings.position_embeddings")) rename_keys.append(("register_tokens", "embeddings.register_tokens")) rename_keys.append(("patch_embed.proj.weight", "embeddings.patch_embeddings.projection.weight")) rename_keys.append(("patch_embed.proj.bias", "embeddings.patch_embeddings.projection.bias")) for i in range(config.num_hidden_layers): # layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"encoder.layer.{i}.norm1.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"encoder.layer.{i}.norm1.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"encoder.layer.{i}.norm2.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"encoder.layer.{i}.norm2.bias")) # MLP if config.use_swiglu_ffn: rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"encoder.layer.{i}.mlp.w12.weight")) rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"encoder.layer.{i}.mlp.w12.bias")) rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"encoder.layer.{i}.mlp.w3.weight")) rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"encoder.layer.{i}.mlp.w3.bias")) else: rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"encoder.layer.{i}.mlp.fc1.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"encoder.layer.{i}.mlp.fc1.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"encoder.layer.{i}.mlp.fc2.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"encoder.layer.{i}.mlp.fc2.bias")) # layerscale rename_keys.append((f"blocks.{i}.ls1.gamma", f"encoder.layer.{i}.layer_scale1.lambda1")) rename_keys.append((f"blocks.{i}.ls2.gamma", f"encoder.layer.{i}.layer_scale2.lambda1")) # attention projection layer rename_keys.append((f"blocks.{i}.attn.proj.weight", f"encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"encoder.layer.{i}.attention.output.dense.bias")) # final layernorm rename_keys.append(("norm.weight", "layernorm.weight")) rename_keys.append(("norm.bias", "layernorm.bias")) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :] state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") return image @torch.no_grad() def convert_dinov2_with_registers_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our Dinov2WithRegisters structure. """ # define default Dinov2WithRegisters configuration image_classifier = "1layer" in model_name config = get_dinov2_with_registers_config(model_name, image_classifier=image_classifier) # load original model from torch hub original_model = torch.hub.load("facebookresearch/dinov2", model_name.replace("_1layer", "")) original_model.eval() # load state_dict of original model, remove and rename some keys state_dict = original_model.state_dict() rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config) for key, val in state_dict.copy().items(): val = state_dict.pop(key) if "w12" in key: key = key.replace("w12", "weights_in") if "w3" in key: key = key.replace("w3", "weights_out") state_dict[key] = val # load HuggingFace model if image_classifier: model = Dinov2WithRegistersForImageClassification(config).eval() model.dinov2_with_registers.load_state_dict(state_dict) model_name_to_classifier_dict_url = { "dinov2_vits14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_linear_head.pth", "dinov2_vitb14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_linear_head.pth", "dinov2_vitl14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_linear_head.pth", "dinov2_vitg14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_linear_head.pth", } url = model_name_to_classifier_dict_url[model_name] classifier_state_dict = torch.hub.load_state_dict_from_url(url, map_location="cpu") model.classifier.weight = nn.Parameter(classifier_state_dict["weight"]) model.classifier.bias = nn.Parameter(classifier_state_dict["bias"]) else: model = Dinov2WithRegistersModel(config).eval() model.load_state_dict(state_dict) # load image image = prepare_img() # preprocess image transformations = transforms.Compose( [ transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=IMAGENET_DEFAULT_MEAN, # these are RGB mean+std values std=IMAGENET_DEFAULT_STD, # across a large photo dataset. ), ] ) original_pixel_values = transformations(image).unsqueeze(0) # insert batch dimension processor = BitImageProcessor( size={"shortest_edge": 256}, resample=PILImageResampling.BICUBIC, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, ) pixel_values = processor(image, return_tensors="pt").pixel_values assert torch.allclose(original_pixel_values, pixel_values) with torch.no_grad(): outputs = model(pixel_values, output_hidden_states=True) original_outputs = original_model(pixel_values) # assert values if image_classifier: print("Predicted class:") class_idx = outputs.logits.argmax(-1).item() print(model.config.id2label[class_idx]) else: assert outputs.last_hidden_state[:, 0].shape == original_outputs.shape assert torch.allclose(outputs.last_hidden_state[:, 0], original_outputs, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model_name_to_hf_name = { "dinov2_vits14_reg": "dinov2-with-registers-small", "dinov2_vitb14_reg": "dinov2-with-registers-base", "dinov2_vitl14_reg": "dinov2-with-registers-large", "dinov2_vitg14_reg": "dinov2-with-registers-giant", "dinov2_vits14_reg_1layer": "dinov2-with-registers-small-imagenet1k-1-layer", "dinov2_vitb14_reg_1layer": "dinov2-with-registers-base-imagenet1k-1-layer", "dinov2_vitl14_reg_1layer": "dinov2-with-registers-large-imagenet1k-1-layer", "dinov2_vitg14_reg_1layer": "dinov2-with-registers-giant-imagenet1k-1-layer", } name = model_name_to_hf_name[model_name] model.push_to_hub(f"nielsr/{name}") processor.push_to_hub(f"nielsr/{name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="dinov2_vits14_reg", type=str, choices=[ "dinov2_vits14_reg", "dinov2_vitb14_reg", "dinov2_vitl14_reg", "dinov2_vitg14_reg", "dinov2_vits14_reg_1layer", "dinov2_vitb14_reg_1layer", "dinov2_vitl14_reg_1layer", "dinov2_vitg14_reg_1layer", ], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_dinov2_with_registers_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/dinov2_with_registers/convert_dinov2_with_registers_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/dinov2_with_registers/convert_dinov2_with_registers_to_hf.py", "repo_id": "transformers", "token_count": 5408 }
474
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Callable, Optional import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput, ) from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_distilbert import DistilBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "distilbert-base-uncased" _CONFIG_FOR_DOC = "DistilBertConfig" FLAX_DISTILBERT_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DISTILBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ def get_angles(pos, i, d_model): angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model)) return pos * angle_rates def positional_encoding(position, d_model): # create the sinusoidal pattern for the positional encoding angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model) # apply sin to even indices in the array; 2i angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2]) # apply cos to odd indices in the array; 2i+1 angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2]) pos_encoding = angle_rads[np.newaxis, ...] return jnp.array(pos_encoding) class FlaxEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.dim, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) if not self.config.sinusoidal_pos_embds: self.position_embeddings = nn.Embed( self.config.max_position_embeddings, self.config.dim, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) else: self.pos_encoding = positional_encoding(self.config.max_position_embeddings, self.config.dim) self.LayerNorm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.dropout) def __call__(self, input_ids, deterministic: bool = True): # Embed batch_size, seq_length = input_ids.shape inputs_embeds = self.word_embeddings(input_ids.astype("i4")) if not self.config.sinusoidal_pos_embds: position_ids = jnp.arange(seq_length).astype("i4") position_ids = jnp.broadcast_to(position_ids, shape=(batch_size, seq_length)) position_embeds = self.position_embeddings(position_ids.astype("i4")) else: position_embeds = self.pos_encoding[:, :seq_length, :] # explicitly cast the positions here, since self.embed_positions are not registered as parameters position_embeds = position_embeds.astype(inputs_embeds.dtype) # Sum all embeddings hidden_states = inputs_embeds + position_embeds # Layer Norm hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxMultiHeadSelfAttention(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.n_heads = self.config.n_heads self.dim = self.config.dim self.dropout = nn.Dropout(rate=self.config.attention_dropout) if not (self.dim % self.n_heads == 0): raise ValueError(f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}") self.q_lin = nn.Dense( self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.k_lin = nn.Dense( self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.v_lin = nn.Dense( self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.out_lin = nn.Dense( self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) def __call__( self, query, key, value, mask, deterministic: bool = True, output_attentions: bool = False, ): bs, q_len, dim = query.shape k_len = key.shape[1] # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured' # assert key.size() == value.size() dim_per_head = self.dim // self.n_heads mask_reshp = (bs, 1, 1, k_len) def shape(x): """separate heads""" return x.reshape(bs, -1, self.n_heads, dim_per_head).transpose(0, 2, 1, 3) def unshape(x): """group heads""" return x.transpose(0, 2, 1, 3).reshape(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(query)) # (bs, n_heads, q_len, dim_per_head) k = shape(self.k_lin(key)) # (bs, n_heads, k_len, dim_per_head) v = shape(self.v_lin(value)) # (bs, n_heads, k_len, dim_per_head) q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_len, dim_per_head) scores = jnp.matmul(q, k.transpose(0, 1, 3, 2)) # (bs, n_heads, q_len, k_len) mask = jnp.reshape(mask, mask_reshp) mask = mask.astype(scores.dtype) scores = scores - 1e30 * (1.0 - mask) weights = nn.softmax(scores, axis=-1) # (bs, n_heads, q_len, k_len) weights = self.dropout(weights, deterministic=deterministic) context = jnp.matmul(weights, v) # (bs, n_heads, q_len, dim_per_head) context = unshape(context) # (bs, q_len, dim) context = self.out_lin(context) # (bs, q_len, dim) if output_attentions: return (context, weights) else: return (context,) class FlaxFFN(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dropout = nn.Dropout(rate=self.config.dropout) self.chunk_size_feed_forward = self.config.chunk_size_feed_forward self.seq_len_dim = 1 self.lin1 = nn.Dense( self.config.hidden_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.lin2 = nn.Dense( self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.activation = ACT2FN[self.config.activation] def __call__(self, hidden_states, deterministic: bool = True): hidden_states = self.lin1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.lin2(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxTransformerBlock(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): assert self.config.dim % self.config.n_heads == 0, ( f"Hidden size {self.config.dim} not dividable by number of heads {self.config.n_heads}" ) self.attention = FlaxMultiHeadSelfAttention(self.config, dtype=self.dtype) self.sa_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) self.ffn = FlaxFFN(self.config, dtype=self.dtype) self.output_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) def __call__( self, hidden_states, attn_mask, output_attentions: bool = False, deterministic: bool = True, ): # Self-Attention sa_output = self.attention( query=hidden_states, key=hidden_states, value=hidden_states, mask=attn_mask, output_attentions=output_attentions, deterministic=deterministic, ) if output_attentions: sa_output, sa_weights = sa_output else: assert type(sa_output) is tuple sa_output = sa_output[0] sa_output = self.sa_layer_norm(sa_output + hidden_states) # Feed Forward Network ffn_output = self.ffn(sa_output, deterministic=deterministic) ffn_output = self.output_layer_norm(ffn_output + sa_output) output = (ffn_output,) if output_attentions: output = (sa_weights,) + output return output class FlaxTransformer(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxTransformerBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.n_layers) ] def __call__( self, hidden_states, attention_mask, output_attentions: bool = False, output_hidden_states: bool = False, deterministic: bool = True, return_dict: bool = False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for layer_module in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states=hidden_states, attn_mask=attention_mask, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = layer_outputs[-1] if output_attentions: assert len(layer_outputs) == 2 attentions = layer_outputs[0] all_attentions = all_attentions + (attentions,) else: assert len(layer_outputs) == 1 # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_attentions, all_hidden_states] if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class FlaxTransformerEncoder(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layer = FlaxTransformer(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, output_attentions: bool = False, output_hidden_states: bool = False, deterministic: bool = True, return_dict: bool = False, ): return self.layer( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, deterministic=deterministic, return_dict=return_dict, ) class FlaxDistilBertLMDecoder(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, inputs, kernel): inputs = jnp.asarray(inputs, self.dtype) kernel = jnp.asarray(kernel, self.dtype) y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ()))) bias = jnp.asarray(self.bias, self.dtype) y = y + bias return y class FlaxDistilBertPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DistilBertConfig base_model_prefix = "distilbert" module_class: nn.Module = None def __init__( self, config: DistilBertConfig, input_shape: tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, head_mask=None, params: Optional[dict] = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs, ) class FlaxDistilBertModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.embeddings = FlaxEmbeddings(self.config, dtype=self.dtype) self.transformer = FlaxTransformerEncoder(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict input_embeds = self.embeddings(input_ids, deterministic=deterministic) return self.transformer( hidden_states=input_embeds, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) @add_start_docstrings( "The bare DistilBert Model transformer outputting raw hidden-states without any specific head on top.", FLAX_DISTILBERT_START_DOCSTRING, ) class FlaxDistilBertModel(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertModule append_call_sample_docstring(FlaxDistilBertModel, _CHECKPOINT_FOR_DOC, None, _CONFIG_FOR_DOC) class FlaxDistilBertForMaskedLMModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.distilbert = FlaxDistilBertModule(self.config, dtype=self.dtype) self.vocab_transform = nn.Dense( self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.vocab_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) if self.config.tie_word_embeddings: self.vocab_projector = FlaxDistilBertLMDecoder( self.config, dtype=self.dtype, ) else: self.vocab_projector = nn.Dense( self.config.vocab_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict dlbrt_output = self.distilbert( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, deterministic=deterministic, return_dict=return_dict, ) hidden_states = dlbrt_output[0] prediction_logits = self.vocab_transform(hidden_states) prediction_logits = ACT2FN[self.config.activation](prediction_logits) prediction_logits = self.vocab_layer_norm(prediction_logits) if self.config.tie_word_embeddings: shared_embedding = self.distilbert.variables["params"]["embeddings"]["word_embeddings"]["embedding"] prediction_logits = self.vocab_projector(prediction_logits, shared_embedding.T) else: prediction_logits = self.vocab_projector(prediction_logits) if not return_dict: output = (prediction_logits,) + dlbrt_output[1:] return output return FlaxMaskedLMOutput( logits=prediction_logits, hidden_states=dlbrt_output.hidden_states, attentions=dlbrt_output.attentions, ) @add_start_docstrings("""DistilBert Model with a `language modeling` head on top.""", FLAX_DISTILBERT_START_DOCSTRING) class FlaxDistilBertForMaskedLM(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForMaskedLMModule append_call_sample_docstring(FlaxDistilBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC) class FlaxDistilBertForSequenceClassificationModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.pre_classifier = nn.Dense( self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout) self.classifier = nn.Dense( self.config.num_labels, dtype=self.dtype, ) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Model distilbert_output = self.distilbert( input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = distilbert_output[0] # (bs, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs, dim) pooled_output = self.pre_classifier(pooled_output) # (bs, dim) pooled_output = ACT2FN["relu"](pooled_output) pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) # (bs, dim) if not return_dict: return (logits,) + distilbert_output[1:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions, ) @add_start_docstrings( """ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, FLAX_DISTILBERT_START_DOCSTRING, ) class FlaxDistilBertForSequenceClassification(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForSequenceClassificationModule append_call_sample_docstring( FlaxDistilBertForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC, ) class FlaxDistilBertForMultipleChoiceModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.pre_classifier = nn.Dense( self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout) self.classifier = nn.Dense( 1, dtype=self.dtype, ) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None # Model outputs = self.distilbert( input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = outputs[0] pooled_output = hidden_state[:, 0] pooled_output = self.pre_classifier(pooled_output) pooled_output = ACT2FN["relu"](pooled_output) pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, FLAX_DISTILBERT_START_DOCSTRING, ) class FlaxDistilBertForMultipleChoice(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForMultipleChoiceModule overwrite_call_docstring( FlaxDistilBertForMultipleChoice, DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) append_call_sample_docstring( FlaxDistilBertForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC, ) class FlaxDistilBertForTokenClassificationModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Model outputs = self.distilbert( input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, FLAX_DISTILBERT_START_DOCSTRING, ) class FlaxDistilBertForTokenClassification(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForTokenClassificationModule append_call_sample_docstring( FlaxDistilBertForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC, ) class FlaxDistilBertForQuestionAnsweringModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) assert self.config.num_labels == 2 self.dropout = nn.Dropout(rate=self.config.qa_dropout) def __call__( self, input_ids, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Model distilbert_output = self.distilbert( input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = distilbert_output[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.qa_outputs(hidden_states) start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + distilbert_output[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions, ) @add_start_docstrings( """ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, FLAX_DISTILBERT_START_DOCSTRING, ) class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForQuestionAnsweringModule append_call_sample_docstring( FlaxDistilBertForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) __all__ = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ]
transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py/0
{ "file_path": "transformers/src/transformers/models/distilbert/modeling_flax_distilbert.py", "repo_id": "transformers", "token_count": 14514 }
475
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for Donut.""" from typing import Optional, Union from ...image_processing_utils_fast import BaseImageProcessorFast, BatchFeature, DefaultFastImageProcessorKwargs from ...image_transforms import group_images_by_shape, reorder_images from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ImageInput, PILImageResampling, SizeDict from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, is_torch_available, is_torchvision_available, is_torchvision_v2_available, logging, ) logger = logging.get_logger(__name__) if is_torch_available(): import torch if is_torchvision_available(): if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F class DonutFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): """ Args: do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random amount of padding on each size, up to the largest image size in the batch. Otherwise, all images are padded to the largest image size in the batch. """ do_thumbnail: Optional[bool] do_align_long_axis: Optional[bool] do_pad: Optional[bool] @auto_docstring class DonutImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {"height": 2560, "width": 1920} do_resize = True do_rescale = True do_normalize = True do_thumbnail = True do_align_long_axis = False do_pad = True valid_kwargs = DonutFastImageProcessorKwargs def __init__(self, **kwargs: Unpack[DonutFastImageProcessorKwargs]): size = kwargs.pop("size", None) if isinstance(size, (tuple, list)): size = size[::-1] kwargs["size"] = size super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[DonutFastImageProcessorKwargs]) -> BatchFeature: if "size" in kwargs: size = kwargs.pop("size") if isinstance(size, (tuple, list)): size = size[::-1] kwargs["size"] = size return super().preprocess(images, **kwargs) def align_long_axis( self, image: "torch.Tensor", size: SizeDict, ) -> "torch.Tensor": """ Align the long axis of the image to the longest axis of the specified size. Args: image (`torch.Tensor`): The image to be aligned. size (`dict[str, int]`): The size `{"height": h, "width": w}` to align the long axis to. Returns: `torch.Tensor`: The aligned image. """ input_height, input_width = image.shape[-2:] output_height, output_width = size.height, size.width if (output_width < output_height and input_width > input_height) or ( output_width > output_height and input_width < input_height ): height_dim, width_dim = image.dim() - 2, image.dim() - 1 image = torch.rot90(image, 3, dims=[height_dim, width_dim]) return image def pad_image( self, image: "torch.Tensor", size: SizeDict, random_padding: bool = False, ) -> "torch.Tensor": """ Pad the image to the specified size. Args: image (`torch.Tensor`): The image to be padded. size (`dict[str, int]`): The size `{"height": h, "width": w}` to pad the image to. random_padding (`bool`, *optional*, defaults to `False`): Whether to use random padding or not. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ output_height, output_width = size.height, size.width input_height, input_width = image.shape[-2:] delta_width = output_width - input_width delta_height = output_height - input_height if random_padding: pad_top = torch.random.randint(low=0, high=delta_height + 1) pad_left = torch.random.randint(low=0, high=delta_width + 1) else: pad_top = delta_height // 2 pad_left = delta_width // 2 pad_bottom = delta_height - pad_top pad_right = delta_width - pad_left padding = (pad_left, pad_top, pad_right, pad_bottom) return F.pad(image, padding) def pad(self, *args, **kwargs): logger.info("pad is deprecated and will be removed in version 4.27. Please use pad_image instead.") return self.pad_image(*args, **kwargs) def thumbnail( self, image: "torch.Tensor", size: SizeDict, ) -> "torch.Tensor": """ Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any corresponding dimension of the specified size. Args: image (`torch.Tensor`): The image to be resized. size (`dict[str, int]`): The size `{"height": h, "width": w}` to resize the image to. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): The resampling filter to use. data_format (`Optional[Union[str, ChannelDimension]]`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ input_height, input_width = image.shape[-2:] output_height, output_width = size.height, size.width # We always resize to the smallest of either the input or output size. height = min(input_height, output_height) width = min(input_width, output_width) if height == input_height and width == input_width: return image if input_height > input_width: width = int(input_width * height / input_height) elif input_width > input_height: height = int(input_height * width / input_width) return self.resize( image, size=SizeDict(width=width, height=height), interpolation=F.InterpolationMode.BICUBIC, ) def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, do_thumbnail: bool, do_align_long_axis: bool, do_pad: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_align_long_axis: stacked_images = self.align_long_axis(image=stacked_images, size=size) if do_resize: shortest_edge = min(size.height, size.width) stacked_images = self.resize( image=stacked_images, size=SizeDict(shortest_edge=shortest_edge), interpolation=interpolation ) if do_thumbnail: stacked_images = self.thumbnail(image=stacked_images, size=size) if do_pad: stacked_images = self.pad_image(image=stacked_images, size=size, random_padding=False) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_center_crop: stacked_images = self.center_crop(stacked_images, crop_size) # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["DonutImageProcessorFast"]
transformers/src/transformers/models/donut/image_processing_donut_fast.py/0
{ "file_path": "transformers/src/transformers/models/donut/image_processing_donut_fast.py", "repo_id": "transformers", "token_count": 4442 }
476
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DINOv2 + DPT checkpoints from the original repository. URL: https://github.com/facebookresearch/dinov2/tree/main""" import argparse import itertools import math from pathlib import Path import requests import torch from PIL import Image from torchvision import transforms from transformers import Dinov2Config, DPTConfig, DPTForDepthEstimation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dpt_config(model_name): if "small" in model_name: # equivalent to stage 3, stage 6, stage 9, stage 12 backbone_config = Dinov2Config.from_pretrained( "facebook/dinov2-small", out_indices=[3, 6, 9, 12], apply_layernorm=False, reshape_hidden_states=False ) neck_hidden_sizes = [48, 96, 192, 384] elif "base" in model_name: backbone_config = Dinov2Config.from_pretrained( "facebook/dinov2-base", out_indices=[3, 6, 9, 12], apply_layernorm=False, reshape_hidden_states=False ) neck_hidden_sizes = [96, 192, 384, 768] elif "large" in model_name: backbone_config = Dinov2Config.from_pretrained( "facebook/dinov2-large", out_indices=[5, 12, 18, 24], apply_layernorm=False, reshape_hidden_states=False ) neck_hidden_sizes = [128, 256, 512, 1024] elif "giant" in model_name: backbone_config = Dinov2Config.from_pretrained( "facebook/dinov2-giant", out_indices=[10, 20, 30, 40], apply_layernorm=False, reshape_hidden_states=False ) neck_hidden_sizes = [192, 384, 768, 1536] else: raise NotImplementedError("To do") config = DPTConfig( backbone_config=backbone_config, neck_hidden_sizes=neck_hidden_sizes, use_bias_in_fusion_residual=False, add_projection=True, ) return config # here we list all DPT keys to be renamed (original name on the left, our name on the right) def create_rename_keys_dpt(config): rename_keys = [] # fmt: off # activation postprocessing (projections, readout projections + resize blocks) for i in range(4): rename_keys.append((f"decode_head.reassemble_blocks.projects.{i}.conv.weight", f"neck.reassemble_stage.layers.{i}.projection.weight")) rename_keys.append((f"decode_head.reassemble_blocks.projects.{i}.conv.bias", f"neck.reassemble_stage.layers.{i}.projection.bias")) rename_keys.append((f"decode_head.reassemble_blocks.readout_projects.{i}.0.weight", f"neck.reassemble_stage.readout_projects.{i}.0.weight")) rename_keys.append((f"decode_head.reassemble_blocks.readout_projects.{i}.0.bias", f"neck.reassemble_stage.readout_projects.{i}.0.bias")) if i != 2: rename_keys.append((f"decode_head.reassemble_blocks.resize_layers.{i}.weight", f"neck.reassemble_stage.layers.{i}.resize.weight")) rename_keys.append((f"decode_head.reassemble_blocks.resize_layers.{i}.bias", f"neck.reassemble_stage.layers.{i}.resize.bias")) # fusion layers for i in range(4): rename_keys.append((f"decode_head.fusion_blocks.{i}.project.conv.weight", f"neck.fusion_stage.layers.{i}.projection.weight")) rename_keys.append((f"decode_head.fusion_blocks.{i}.project.conv.bias", f"neck.fusion_stage.layers.{i}.projection.bias")) if i != 0: rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit1.conv1.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer1.convolution1.weight")) rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit1.conv2.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer1.convolution2.weight")) rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit2.conv1.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer2.convolution1.weight")) rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit2.conv2.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer2.convolution2.weight")) # neck convolutions for i in range(4): rename_keys.append((f"decode_head.convs.{i}.conv.weight", f"neck.convs.{i}.weight")) # head rename_keys.append(("decode_head.project.conv.weight", "head.projection.weight")) rename_keys.append(("decode_head.project.conv.bias", "head.projection.bias")) for i in range(0, 5, 2): rename_keys.append((f"decode_head.conv_depth.head.{i}.weight", f"head.head.{i}.weight")) rename_keys.append((f"decode_head.conv_depth.head.{i}.bias", f"head.head.{i}.bias")) # fmt: on return rename_keys # here we list all backbone keys to be renamed (original name on the left, our name on the right) def create_rename_keys_backbone(config): rename_keys = [] # fmt: off # patch embedding layer rename_keys.append(("cls_token", "backbone.embeddings.cls_token")) rename_keys.append(("mask_token", "backbone.embeddings.mask_token")) rename_keys.append(("pos_embed", "backbone.embeddings.position_embeddings")) rename_keys.append(("patch_embed.proj.weight", "backbone.embeddings.patch_embeddings.projection.weight")) rename_keys.append(("patch_embed.proj.bias", "backbone.embeddings.patch_embeddings.projection.bias")) # Transformer encoder for i in range(config.backbone_config.num_hidden_layers): # layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"backbone.encoder.layer.{i}.norm1.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"backbone.encoder.layer.{i}.norm1.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"backbone.encoder.layer.{i}.norm2.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"backbone.encoder.layer.{i}.norm2.bias")) # MLP if config.backbone_config.use_swiglu_ffn: rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"backbone.encoder.layer.{i}.mlp.w12.weight")) rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"backbone.encoder.layer.{i}.mlp.w12.bias")) rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"backbone.encoder.layer.{i}.mlp.w3.weight")) rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"backbone.encoder.layer.{i}.mlp.w3.bias")) else: rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"backbone.encoder.layer.{i}.mlp.fc1.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"backbone.encoder.layer.{i}.mlp.fc1.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"backbone.encoder.layer.{i}.mlp.fc2.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"backbone.encoder.layer.{i}.mlp.fc2.bias")) # layerscale rename_keys.append((f"blocks.{i}.ls1.gamma", f"backbone.encoder.layer.{i}.layer_scale1.lambda1")) rename_keys.append((f"blocks.{i}.ls2.gamma", f"backbone.encoder.layer.{i}.layer_scale2.lambda1")) # attention projection layer rename_keys.append((f"blocks.{i}.attn.proj.weight", f"backbone.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"backbone.encoder.layer.{i}.attention.output.dense.bias")) # fmt: on rename_keys.append(("norm.weight", "backbone.layernorm.weight")) rename_keys.append(("norm.bias", "backbone.layernorm.bias")) return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config): for i in range(config.backbone_config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") hidden_size = config.backbone_config.hidden_size # next, add query, keys and values (in that order) to the state dict state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[:hidden_size, :] state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[:hidden_size] state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ hidden_size : hidden_size * 2, : ] state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ hidden_size : hidden_size * 2 ] state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-hidden_size:, :] state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-hidden_size:] def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "https://dl.fbaipublicfiles.com/dinov2/images/example.jpg" im = Image.open(requests.get(url, stream=True).raw) return im name_to_url = { "dpt-dinov2-small-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_nyu_dpt_head.pth", "dpt-dinov2-small-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_kitti_dpt_head.pth", "dpt-dinov2-base-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_nyu_dpt_head.pth", "dpt-dinov2-base-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_kitti_dpt_head.pth", "dpt-dinov2-large-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_nyu_dpt_head.pth", "dpt-dinov2-large-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_kitti_dpt_head.pth", "dpt-dinov2-giant-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_nyu_dpt_head.pth", "dpt-dinov2-giant-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_kitti_dpt_head.pth", } def get_original_pixel_values(image): class CenterPadding: def __init__(self, multiple): super().__init__() self.multiple = multiple def _get_pad(self, size): new_size = math.ceil(size / self.multiple) * self.multiple pad_size = new_size - size pad_size_left = pad_size // 2 pad_size_right = pad_size - pad_size_left return pad_size_left, pad_size_right def __call__(self, img): pads = list(itertools.chain.from_iterable(self._get_pad(m) for m in img.shape[-2:][::-1])) output = torch.nn.functional.pad(img, pads) return output def __repr__(self): return self.__class__.__name__ + "()" def make_depth_transform() -> transforms.Compose: return transforms.Compose( [ transforms.ToTensor(), lambda x: 255.0 * x[:3], # Discard alpha component and scale by 255 transforms.Normalize( mean=(123.675, 116.28, 103.53), std=(58.395, 57.12, 57.375), ), CenterPadding(multiple=14), ] ) transform = make_depth_transform() original_pixel_values = transform(image).unsqueeze(0) return original_pixel_values @torch.no_grad() def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, verify_logits): """ Copy/paste/tweak model's weights to our DPT structure. """ # define DPT configuration based on URL checkpoint_url = name_to_url[model_name] config = get_dpt_config(model_name) # load original DPT state_dict from URL print("URL:", checkpoint_url) dpt_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"] # rename keys rename_keys = create_rename_keys_dpt(config) for src, dest in rename_keys: rename_key(dpt_state_dict, src, dest) # load original backbone state_dict from URL if "small" in model_name: original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vits14") elif "base" in model_name: original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vitb14") elif "large" in model_name: original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vitl14") elif "giant" in model_name: original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vitg14") else: raise NotImplementedError("To do") original_model.eval() backbone_state_dict = original_model.state_dict() # rename keys rename_keys = create_rename_keys_backbone(config) for src, dest in rename_keys: rename_key(backbone_state_dict, src, dest) # read in qkv matrices read_in_q_k_v(backbone_state_dict, config) for key, val in backbone_state_dict.copy().items(): val = backbone_state_dict.pop(key) if "w12" in key: key = key.replace("w12", "weights_in") if "w3" in key: key = key.replace("w3", "weights_out") backbone_state_dict[key] = val # merge state_dicts state_dict = {**backbone_state_dict, **dpt_state_dict} # load HuggingFace model model = DPTForDepthEstimation(config) missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) print("Missing keys:", missing_keys) print("Unexpected keys:", unexpected_keys) assert missing_keys == [ "neck.fusion_stage.layers.0.residual_layer1.convolution1.weight", "neck.fusion_stage.layers.0.residual_layer1.convolution2.weight", ] model.eval() # Verify image processor processor = DPTImageProcessor( do_resize=False, do_rescale=False, do_pad=True, size_divisor=14, do_normalize=True, image_mean=(123.675, 116.28, 103.53), image_std=(58.395, 57.12, 57.375), ) image = prepare_img() pixel_values = processor(image, return_tensors="pt").pixel_values.float() original_pixel_values = get_original_pixel_values(image) assert torch.allclose(pixel_values, original_pixel_values) # Verify forward pass with torch.no_grad(): outputs = model(pixel_values) predicted_depth = outputs.predicted_depth print("Shape of predicted depth:", predicted_depth.shape) print("First values of predicted depth:", predicted_depth[0, :3, :3]) # assert logits if verify_logits: if model_name == "dpt-dinov2-small-nyu": expected_shape = torch.Size([1, 576, 736]) expected_slice = torch.tensor( [[3.3576, 3.4741, 3.4345], [3.4324, 3.5012, 3.2775], [3.2560, 3.3563, 3.2354]] ) assert predicted_depth.shape == torch.Size(expected_shape) assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-5) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model and processor to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing model and processor to hub...") model.push_to_hub(repo_id=f"facebook/{model_name}") processor.push_to_hub(repo_id=f"facebook/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="dpt-dinov2-small-nyu", type=str, choices=name_to_url.keys(), help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub after conversion.", ) parser.add_argument( "--verify_logits", action="store_true", required=False, help="Path to the output PyTorch model directory.", ) args = parser.parse_args() convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.verify_logits)
transformers/src/transformers/models/dpt/convert_dinov2_depth_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/dpt/convert_dinov2_depth_to_hf.py", "repo_id": "transformers", "token_count": 7344 }
477
# coding=utf-8 # Copyright 2023 Google Research, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """EfficientNet model configuration""" from collections import OrderedDict from collections.abc import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class EfficientNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`EfficientNetModel`]. It is used to instantiate an EfficientNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the EfficientNet [google/efficientnet-b7](https://huggingface.co/google/efficientnet-b7) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 600): The input image size. width_coefficient (`float`, *optional*, defaults to 2.0): Scaling coefficient for network width at each stage. depth_coefficient (`float`, *optional*, defaults to 3.1): Scaling coefficient for network depth at each stage. depth_divisor `int`, *optional*, defaults to 8): A unit of network width. kernel_sizes (`list[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`): List of kernel sizes to be used in each block. in_channels (`list[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`): List of input channel sizes to be used in each block for convolutional layers. out_channels (`list[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`): List of output channel sizes to be used in each block for convolutional layers. depthwise_padding (`list[int]`, *optional*, defaults to `[]`): List of block indices with square padding. strides (`list[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`): List of stride sizes to be used in each block for convolutional layers. num_block_repeats (`list[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`): List of the number of times each block is to repeated. expand_ratios (`list[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`): List of scaling coefficient of each block. squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25): Squeeze expansion ratio. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, `"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported. hidden_dim (`int`, *optional*, defaults to 1280): The hidden dimension of the layer before the classification head. pooling_type (`str` or `function`, *optional*, defaults to `"mean"`): Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`, `"max"`] initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. batch_norm_eps (`float`, *optional*, defaults to 1e-3): The epsilon used by the batch normalization layers. batch_norm_momentum (`float`, *optional*, defaults to 0.99): The momentum used by the batch normalization layers. dropout_rate (`float`, *optional*, defaults to 0.5): The dropout rate to be applied before final classifier layer. drop_connect_rate (`float`, *optional*, defaults to 0.2): The drop rate for skip connections. Example: ```python >>> from transformers import EfficientNetConfig, EfficientNetModel >>> # Initializing a EfficientNet efficientnet-b7 style configuration >>> configuration = EfficientNetConfig() >>> # Initializing a model (with random weights) from the efficientnet-b7 style configuration >>> model = EfficientNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "efficientnet" def __init__( self, num_channels: int = 3, image_size: int = 600, width_coefficient: float = 2.0, depth_coefficient: float = 3.1, depth_divisor: int = 8, kernel_sizes: list[int] = [3, 3, 5, 3, 5, 5, 3], in_channels: list[int] = [32, 16, 24, 40, 80, 112, 192], out_channels: list[int] = [16, 24, 40, 80, 112, 192, 320], depthwise_padding: list[int] = [], strides: list[int] = [1, 2, 2, 2, 1, 2, 1], num_block_repeats: list[int] = [1, 2, 2, 3, 3, 4, 1], expand_ratios: list[int] = [1, 6, 6, 6, 6, 6, 6], squeeze_expansion_ratio: float = 0.25, hidden_act: str = "swish", hidden_dim: int = 2560, pooling_type: str = "mean", initializer_range: float = 0.02, batch_norm_eps: float = 0.001, batch_norm_momentum: float = 0.99, dropout_rate: float = 0.5, drop_connect_rate: float = 0.2, **kwargs, ): super().__init__(**kwargs) self.num_channels = num_channels self.image_size = image_size self.width_coefficient = width_coefficient self.depth_coefficient = depth_coefficient self.depth_divisor = depth_divisor self.kernel_sizes = kernel_sizes self.in_channels = in_channels self.out_channels = out_channels self.depthwise_padding = depthwise_padding self.strides = strides self.num_block_repeats = num_block_repeats self.expand_ratios = expand_ratios self.squeeze_expansion_ratio = squeeze_expansion_ratio self.hidden_act = hidden_act self.hidden_dim = hidden_dim self.pooling_type = pooling_type self.initializer_range = initializer_range self.batch_norm_eps = batch_norm_eps self.batch_norm_momentum = batch_norm_momentum self.dropout_rate = dropout_rate self.drop_connect_rate = drop_connect_rate self.num_hidden_layers = sum(num_block_repeats) * 4 class EfficientNetOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-5 __all__ = ["EfficientNetConfig", "EfficientNetOnnxConfig"]
transformers/src/transformers/models/efficientnet/configuration_efficientnet.py/0
{ "file_path": "transformers/src/transformers/models/efficientnet/configuration_efficientnet.py", "repo_id": "transformers", "token_count": 2971 }
478
# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from collections.abc import Iterable from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, pad, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, is_valid_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): from PIL import Image logger = logging.get_logger(__name__) def make_batched_images(images) -> list[list[ImageInput]]: """ Accepts images in list or nested list format, and makes a list of images for preprocessing. Args: images (`Union[list[list[ImageInput]], list[ImageInput], ImageInput]`): The input image. Returns: list: A list of images. """ if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]): return [img for img_list in images for img in img_list] elif isinstance(images, (list, tuple)) and is_valid_image(images[0]): return images elif is_valid_image(images): return [images] raise ValueError(f"Could not make batched images from {images}") def smart_resize( height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 14 * 14 * 4 * 1280 ): """Rescales the image so that the following conditions are met: 1. Both dimensions (height and width) are divisible by 'factor'. 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. 3. The aspect ratio of the image is maintained as closely as possible. """ if max(height, width) / min(height, width) > 200: raise ValueError( f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}" ) h_bar = round(height / factor) * factor w_bar = round(width / factor) * factor if h_bar * w_bar > max_pixels: beta = math.sqrt((height * width) / max_pixels) h_bar = max(factor, math.floor(height / beta / factor) * factor) w_bar = max(factor, math.floor(width / beta / factor) * factor) elif h_bar * w_bar < min_pixels: beta = math.sqrt(min_pixels / (height * width)) h_bar = math.ceil(height * beta / factor) * factor w_bar = math.ceil(width * beta / factor) * factor return h_bar, w_bar class Emu3ImageProcessor(BaseImageProcessor): r""" Constructs a Emu3 image processor that dynamically resizes images based on the original images. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats for each channel in the image. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. min_pixels (`int`, *optional*, defaults to `512 * 512`): The min pixels of the image to resize the image. max_pixels (`int`, *optional*, defaults to `1024 * 1024`): The max pixels of the image to resize the image. spatial_factor (`int`, *optional*, defaults to 8): The spatial downsample factor the image will be downsampled in feature extracting phase """ model_input_names = ["pixel_values", "image_sizes"] def __init__( self, do_resize: bool = True, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: bool = True, do_pad: bool = True, min_pixels: int = 512 * 512, max_pixels: int = 1024 * 1024, spatial_factor: int = 8, **kwargs, ) -> None: super().__init__(**kwargs) self.do_resize = do_resize self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.min_pixels = min_pixels self.max_pixels = max_pixels self.spatial_factor = spatial_factor self.size = {"min_pixels": min_pixels, "max_pixels": max_pixels} self.do_convert_rgb = do_convert_rgb def _preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: Optional[bool] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`. vision_info (`list[Dict]`, *optional*): Optional list of dictionaries containing additional information about vision inputs. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ images = make_list_of_images(images) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) height, width = get_image_size(images[0], channel_dim=input_data_format) resized_height, resized_width = height, width processed_images = [] for image in images: if do_resize: resized_height, resized_width = smart_resize( height, width, factor=self.spatial_factor, min_pixels=self.min_pixels, max_pixels=self.max_pixels, ) image = resize( image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format ) if do_rescale: image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) processed_images.append(image) images = np.array(processed_images) return images def _pad_for_batching( self, pixel_values: list[np.ndarray], image_sizes: list[list[int]], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[np.ndarray]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) image_sizes (`list[list[int]]`): A list of sizes for each image in `pixel_values` in (height, width) format. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: list[`np.ndarray`]: The padded images. """ max_shape = ( max([size[0] for size in image_sizes]), max([size[1] for size in image_sizes]), ) pixel_values = [ pad( image, padding=((0, max_shape[0] - size[0]), (0, max_shape[1] - size[1])), data_format=data_format, input_data_format=input_data_format, ) for image, size in zip(pixel_values, image_sizes) ] return pixel_values def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: Optional[bool] = None, do_pad: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb do_pad = do_pad if do_pad is not None else self.do_pad if images is not None: images = make_batched_images(images) if images is not None and not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) pixel_values = [] for image in images: image = self._preprocess( image, do_resize=do_resize, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, ) pixel_values.extend(image) image_sizes = [image.shape[-2:] for image in pixel_values] if do_pad: pixel_values = self._pad_for_batching(pixel_values, image_sizes) pixel_values = np.array(pixel_values) return BatchFeature( data={"pixel_values": pixel_values, "image_sizes": image_sizes}, tensor_type=return_tensors ) def postprocess( self, images: ImageInput, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, return_tensors: Union[str, TensorType] = "PIL.Image.Image", input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Postprocess an image or batch of images tensor. Postprocess is the reverse process of preprocess. The parameters should be same as in preprocess. Args: images (`ImageInput`): Image to postprocess. Expects a single or batch of images with pixel values ranging from -1 to 1. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = 1.0 / self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) if isinstance(images[0], Image.Image): return images if len(images) > 1 else images[0] if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) pixel_values = [] for image in images: image = to_numpy_array(image) if do_normalize: image = self.unnormalize( image=image, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format ) if do_rescale: image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) image = image.clip(0, 255).astype(np.uint8) if do_normalize and do_rescale and return_tensors == "PIL.Image.Image": image = to_channel_dimension_format(image, ChannelDimension.LAST, input_channel_dim=input_data_format) pixel_values.append(Image.fromarray(image)) else: pixel_values.extend(image) data = {"pixel_values": pixel_values} return_tensors = return_tensors if return_tensors != "PIL.Image.Image" else None return BatchFeature(data=data, tensor_type=return_tensors) def unnormalize( self, image: np.array, image_mean: Union[float, Iterable[float]], image_std: Union[float, Iterable[float]], input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.array: """ Unnormalizes `image` using the mean and standard deviation specified by `mean` and `std`. image = (image * image_std) + image_mean Args: image (`torch.Tensor` of shape `(batch_size, num_channels, image_size, image_size)` or `(num_channels, image_size, image_size)`): Batch of pixel values to postprocess. image_mean (`float` or `Iterable[float]`): The mean to use for unnormalization. image_std (`float` or `Iterable[float]`): The standard deviation to use for unnormalization. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ num_channels = 3 if isinstance(image_mean, Iterable): if len(image_mean) != num_channels: raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(image_mean)}") else: image_mean = [image_mean] * num_channels if isinstance(image_std, Iterable): if len(image_std) != num_channels: raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(image_std)}") else: image_std = [image_std] * num_channels rev_image_mean = tuple(-mean / std for mean, std in zip(image_mean, image_std)) rev_image_std = tuple(1 / std for std in image_std) image = self.normalize( image=image, mean=rev_image_mean, std=rev_image_std, input_data_format=input_data_format ) return image __all__ = ["Emu3ImageProcessor"]
transformers/src/transformers/models/emu3/image_processing_emu3.py/0
{ "file_path": "transformers/src/transformers/models/emu3/image_processing_emu3.py", "repo_id": "transformers", "token_count": 11938 }
479
# coding=utf-8 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import json import os import re from typing import Optional import torch from accelerate import init_empty_weights from huggingface_hub import snapshot_download from transformers import EomtConfig, EomtForUniversalSegmentation, EomtImageProcessorFast # fmt: off MAPPINGS = { # Embeddings r"network.encoder.backbone.cls_token" : r"embeddings.cls_token", r"network.encoder.backbone.reg_token" : r"embeddings.register_tokens", r"network.encoder.backbone.pos_embed" : r"embeddings.position_embeddings.weight", r"network.encoder.backbone.patch_embed.proj" : r"embeddings.patch_embeddings.projection", # Encoder Block r"network.encoder.backbone.blocks.(\d+).norm1" : r"layers.\1.norm1", r"network.encoder.backbone.blocks.(\d+).attn.proj" : r"layers.\1.attention.out_proj", r"network.encoder.backbone.blocks.(\d+).ls1.gamma" : r"layers.\1.layer_scale1.lambda1", r"network.encoder.backbone.blocks.(\d+).norm2" : r"layers.\1.norm2", r"network.encoder.backbone.blocks.(\d+).ls2.gamma" : r"layers.\1.layer_scale2.lambda1", r"network.encoder.backbone.blocks.(\d+).attn" : r"layers.\1.attention", # Others r"network.q.weight" : r"query.weight", r"network.class_head" : r"class_predictor", r"network.upscale.(\d+).conv1" : r"upscale_block.block.\1.conv1", r"network.upscale.(\d+).conv2" : r"upscale_block.block.\1.conv2", r"network.upscale.(\d+).norm" : r"upscale_block.block.\1.layernorm2d", r"network.mask_head.0" : r"mask_head.fc1", r"network.mask_head.2" : r"mask_head.fc2", r"network.mask_head.4" : r"mask_head.fc3", r"network.encoder.backbone.norm" : r"layernorm", r"network.attn_mask_probs" : r"attn_mask_probs", } # fmt: on # Mappings for MLP layers, depending on the type of MLP used in ckpts. MLP_MAPPINGS = { "swiglu_ffn": { r"network.encoder.backbone.blocks.(\d+).mlp.fc1": r"layers.\1.mlp.weights_in", r"network.encoder.backbone.blocks.(\d+).mlp.fc2": r"layers.\1.mlp.weights_out", }, "vanilla_mlp": { r"network.encoder.backbone.blocks.(\d+).mlp": r"layers.\1.mlp", }, } def convert_old_keys_to_new_keys(state_dict): keys_as_text = "\n".join(state_dict.keys()) new_keys_as_text = keys_as_text for old, repl in MAPPINGS.items(): if repl is None: new_keys_as_text = re.sub(old, "", new_keys_as_text) else: new_keys_as_text = re.sub(old, repl, new_keys_as_text) output_dict = dict(zip(keys_as_text.split("\n"), new_keys_as_text.split("\n"))) return output_dict def split_qkv_tensor(key, tensor): """Splits a qkv tensor into separate q, k, v tensors and updates the key accordingly.""" new_keys = ["q_proj", "k_proj", "v_proj"] split_size = tensor.shape[0] // 3 split_tensors = torch.split(tensor, split_size, dim=0) return {key.replace("qkv", new_key): split_tensors[i] for i, new_key in enumerate(new_keys)} def convert_state_dict_to_hf(state_dict): """Convert state dict keys to HF format.""" conversion_dict = convert_old_keys_to_new_keys(state_dict) converted_state_dict = {} for old_key, new_key in conversion_dict.items(): if new_key: if "qkv" in new_key: # Detect merged attention keys and split them. qkv_split_dict = split_qkv_tensor(new_key, state_dict[old_key]) converted_state_dict.update(qkv_split_dict) else: converted_state_dict[new_key] = state_dict[old_key] for i in [ "network.encoder.pixel_mean", "network.encoder.pixel_std", ]: converted_state_dict.pop(i) # Embeddings will not have initial dimension pos_embed_key = "embeddings.position_embeddings.weight" converted_state_dict[pos_embed_key] = converted_state_dict[pos_embed_key].squeeze(0) return converted_state_dict def ensure_model_downloaded( repo_id: Optional[str] = None, revision: Optional[str] = None, local_dir: Optional[str] = None ) -> str: """ Ensures model files are downloaded locally, downloads them if not. Returns path to local files. Args: repo_id: The Hugging Face model repo ID (required if local_dir not provided) revision: Optional git revision to use local_dir: Optional local directory path where model files should be stored/found """ if local_dir is not None: if os.path.exists(local_dir): print(f"Using provided local directory: {local_dir}") else: # Create the local directory if it doesn't exist os.makedirs(local_dir, exist_ok=True) print(f"Created local directory: {local_dir}") if repo_id is None: raise ValueError("Either repo_id or local_dir must be provided") print(f"Ensuring {repo_id} (revision: {revision or 'latest'}) is downloaded...") try: # First try to find files locally download_dir = snapshot_download(repo_id, revision=revision, local_files_only=True, local_dir=local_dir) print(f"Found model files locally at {download_dir}") return download_dir except Exception: # If files not found locally, download them print(f"Downloading model files for {repo_id}...") download_dir = snapshot_download(repo_id, revision=revision, local_files_only=False, local_dir=local_dir) print(f"Downloaded model files to {download_dir}") return download_dir def load_model_state_dict(input_path: str) -> dict: """ Load model state dict, handling both single and sharded files. """ index_path = os.path.join(input_path, "pytorch_model.bin.index.json") single_file_path = os.path.join(input_path, "pytorch_model.bin") # Check if we have a sharded model if os.path.exists(index_path): print("Loading sharded model...") state_dict = {} with open(index_path, "r") as f: index = json.load(f) # Get unique shard files and load each one only once unique_shard_files = sorted(set(index["weight_map"].values())) for shard_file in unique_shard_files: print(f"Loading shard {shard_file}...") shard_path = os.path.join(input_path, shard_file) shard_dict = torch.load(shard_path, map_location="cpu") state_dict.update(shard_dict) return state_dict # Single file model elif os.path.exists(single_file_path): print("Loading single file model...") return torch.load(single_file_path, map_location="cpu") else: raise ValueError(f"No model files found in {input_path}") def convert_model( repo_id=None, local_dir=None, output_dir=None, output_hub_path=None, safe_serialization=True, revision=None, ): """Convert and save the model weights, processor, and configuration.""" if output_dir is None and output_hub_path is None: raise ValueError("At least one of output_dir or output_hub_path must be specified") if repo_id is None and local_dir is None: raise ValueError("Either repo_id or local_dir must be specified") # Create output directory if specified if output_dir: os.makedirs(output_dir, exist_ok=True) print(f"Created/verified output directory: {output_dir}") torch.set_default_dtype(torch.float16) # Download or locate model files input_path = ensure_model_downloaded(repo_id=repo_id, revision=revision, local_dir=local_dir) with open(os.path.join(input_path, "config.json"), "r") as f: config_data = json.load(f) # Pop off unwanted keys _ = config_data.pop("backbone", None) config = EomtConfig( **{ **config_data, "layerscale_value": 1e-5, } ) if "semantic" in repo_id.split("_"): size = {"shortest_edge": config.image_size, "longest_edge": None} do_split_image = True do_pad = False else: size = {"shortest_edge": config.image_size, "longest_edge": config.image_size} do_split_image = False do_pad = True if "giant" in repo_id.split("_"): config.use_swiglu_ffn = True config.hidden_size = 1536 config.num_hidden_layers = 40 config.num_attention_heads = 24 # Update MAPPINGS for ckpts depending on the MLP type MAPPINGS.update(MLP_MAPPINGS["swiglu_ffn"]) else: MAPPINGS.update(MLP_MAPPINGS["vanilla_mlp"]) processor = EomtImageProcessorFast(size=size, do_split_image=do_split_image, do_pad=do_pad) # Save the config and processor if output_dir: config.save_pretrained(output_dir) processor.save_pretrained(output_dir) if output_hub_path: config.push_to_hub(output_hub_path) processor.push_to_hub(output_hub_path) # Initialize model with empty weights print("Creating empty model...") with init_empty_weights(): model = EomtForUniversalSegmentation(config) # Load and convert state dict print("Loading state dict...") state_dict = load_model_state_dict(input_path) state_dict = convert_state_dict_to_hf(state_dict) # Load converted state dict print("Loading converted weights into model...") model.load_state_dict(state_dict, strict=True, assign=True) # Save the model if output_dir: print(f"Saving model to {output_dir}...") model.save_pretrained(output_dir, safe_serialization=safe_serialization) if output_hub_path: print(f"Pushing model to hub at {output_hub_path}...") model.push_to_hub(output_hub_path, safe_serialization=safe_serialization) del state_dict, model gc.collect() # Validate the saved model if saved locally if output_dir: print("Reloading the local model to check if it's saved correctly...") EomtForUniversalSegmentation.from_pretrained(output_dir, device_map="auto") print("Local model reloaded successfully.") def main(): parser = argparse.ArgumentParser() parser.add_argument( "--hf_repo_id", help="HuggingFace Hub repo ID for the model", default=None, ) parser.add_argument( "--local_dir", help="Local directory containing the model files", default=None, ) parser.add_argument( "--revision", help="Specific revision to download from the Hub", default=None, ) parser.add_argument( "--output_dir", help="Location to write HF model locally", default=None, ) parser.add_argument( "--output_hub_path", help="Repository ID to push model to hub (e.g. 'username/model-name')", default=None, ) parser.add_argument( "--safe_serialization", action="store_true", help="Whether to save using safetensors", ) args = parser.parse_args() if args.output_dir is None and args.output_hub_path is None: raise ValueError("At least one of --output_dir or --output_hub_path must be specified") if args.hf_repo_id is None and args.local_dir is None: raise ValueError("Either --hf_repo_id or --local_dir must be specified") convert_model( repo_id=args.hf_repo_id, local_dir=args.local_dir, output_dir=args.output_dir, output_hub_path=args.output_hub_path, safe_serialization=args.safe_serialization, revision=args.revision, ) if __name__ == "__main__": main()
transformers/src/transformers/models/eomt/convert_eomt_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/eomt/convert_eomt_to_hf.py", "repo_id": "transformers", "token_count": 5332 }
480
# Copyright (c) 2025 Baidu, Inc. and HuggingFace Inc. team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Ernie 4.5 MoE model.""" from typing import Optional import torch import torch.nn.functional as F from torch import nn from ...cache_utils import Cache, DynamicCache from ...masking_utils import create_causal_mask from ...modeling_outputs import MoeModelOutputWithPast from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...utils.generic import OutputRecorder, check_model_inputs from ..ernie4_5.modeling_ernie4_5 import Ernie4_5RotaryEmbedding, apply_rotary_pos_emb, rotate_half # noqa: F401 from ..llama.modeling_llama import LlamaAttention, LlamaRMSNorm from ..mixtral.modeling_mixtral import ( MixtralForCausalLM, MixtralPreTrainedModel, ) from ..qwen3_moe.modeling_qwen3_moe import Qwen3MoeDecoderLayer, Qwen3MoeMLP from .configuration_ernie4_5_moe import Ernie4_5_MoeConfig logger = logging.get_logger(__name__) class Ernie4_5_MoeRMSNorm(LlamaRMSNorm): pass class Ernie4_5_MoeMLP(Qwen3MoeMLP): def __init__(self, config, intermediate_size=None): super().__init__(config, intermediate_size) self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) class Ernie4_5_MoeRotaryEmbedding(Ernie4_5RotaryEmbedding): def __init__(self, config: Ernie4_5_MoeConfig, device=None): super().__init__(config, device) class Ernie4_5_MoeAttention(LlamaAttention): def __init__(self, config: Ernie4_5_MoeConfig, layer_idx: int): super().__init__(config, layer_idx) self.attention_dropout = 0.0 self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias) class Ernie4_5_MoeStatics(nn.Module): """ Stores MoE (Mixture of Experts) statistics - Bias for the gating - Additionally, usage per expert in the original codebase """ def __init__(self, config): super().__init__() num_experts_groups = 1 num_experts = config.moe_num_experts self.e_score_correction_bias = nn.Parameter( torch.zeros(num_experts_groups, num_experts, dtype=torch.float32), requires_grad=False, ) def forward(self, hidden_states): # NOTE: This is a workaround to enable TP with a module that only has parameters # # Otherwise, it stays as `DTensor` when called in the "super" forward # 1. All other tensors are local (`torch.Tensor`) # 2. Isolate does not work on `nn.Module` which only has parameters return hidden_states + self.e_score_correction_bias.squeeze() class Ernie4_5_MoeSparseMoeBlock(nn.Module): """ This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations in terms of block-sparse operations to accommodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation and memory on padding. Ernie 4.5 MoE's original formula is based on case (2) with (optional) shared experts and a corrections bias during gating. """ def __init__(self, config): super().__init__() self.num_experts = config.moe_num_experts self.top_k = config.moe_k # correction bias (yes it seems to be a typo with statics <> statistics) self.moe_statics = Ernie4_5_MoeStatics(config) # gating self.gate = nn.Linear(config.hidden_size, config.moe_num_experts, bias=False, dtype=torch.float32) self.experts = nn.ModuleList( [Ernie4_5_MoeMLP(config, config.moe_intermediate_size) for _ in range(config.moe_num_experts)] ) self.norm_min = config.moe_norm_min # (optional) shared experts for all forwards self.shared_experts = None if config.moe_num_shared_experts > 0: self.shared_experts = Ernie4_5_MoeMLP(config, config.moe_intermediate_size * config.moe_num_shared_experts) def forward( self, hidden_states: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) # (Optional) shared experts if self.shared_experts is not None: shared_output = self.shared_experts(hidden_states) device_type = ( hidden_states.device.type if isinstance(hidden_states.device.type, str) and hidden_states.device.type != "mps" else "cpu" ) with torch.autocast(device_type=device_type, enabled=False): # Force float32 # router_logits: (batch * sequence_length, n_experts) router_logits = self.gate(hidden_states.float()) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) _, selected_experts = torch.topk(self.moe_statics(routing_weights), self.top_k, dim=-1) routing_weights = torch.gather(routing_weights, dim=-1, index=selected_experts) routing_weights = routing_weights / torch.clamp( routing_weights.sum(dim=-1, keepdim=True), min=self.norm_min ) routing_weights = routing_weights.to(hidden_states.dtype) final_hidden_states = torch.zeros( (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device ) # One hot encode the selected experts to create an expert mask # this will be used to easily index which expert is going to be sollicitated expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) # Loop over all available experts in the model and perform the computation on each expert expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() for expert_idx in expert_hit: expert_layer = self.experts[expert_idx] idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0)) # Index the correct hidden states and compute the expert hidden state for # the current expert. We need to make sure to multiply the output hidden # states by `routing_weights` on the corresponding tokens (top-1 and top-2) current_state = hidden_states[None, top_x].reshape(-1, hidden_dim) current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None] # However `index_add_` only support torch tensors for indexing so we'll use # the `top_x` tensor here. final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) # Add (optional) shared experts to the result if self.shared_experts is not None: final_hidden_states = final_hidden_states + shared_output final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) return final_hidden_states, router_logits class Ernie4_5_MoeDecoderLayer(Qwen3MoeDecoderLayer, nn.Module): def __init__(self, config, layer_idx): nn.Module().__init__() self.hidden_size = config.hidden_size self.self_attn = Ernie4_5_MoeAttention(config, layer_idx) if ( ((layer_idx + 1) % config.moe_layer_interval == 0) and layer_idx >= config.moe_layer_start_index and layer_idx <= config.moe_layer_end_index ): self.mlp = Ernie4_5_MoeSparseMoeBlock(config) else: self.mlp = Ernie4_5_MoeMLP(config) self.input_layernorm = Ernie4_5_MoeRMSNorm(config.hidden_size, config.rms_norm_eps) self.post_attention_layernorm = Ernie4_5_MoeRMSNorm(config.hidden_size, config.rms_norm_eps) @auto_docstring class Ernie4_5_MoePreTrainedModel(MixtralPreTrainedModel): config: Ernie4_5_MoeConfig _no_split_modules = ["Ernie4_5_MoeDecoderLayer"] _keep_in_fp32_modules_strict = ["gate", "moe_statics"] # Not supporting multi-token prediction (MTP) atm _keys_to_ignore_on_load_unexpected = ["mtp"] _can_record_outputs = { "router_logits": OutputRecorder(Ernie4_5_MoeSparseMoeBlock, index=1), "hidden_states": Ernie4_5_MoeDecoderLayer, "attentions": Ernie4_5_MoeAttention, } def _init_weights(self, module): MixtralPreTrainedModel._init_weights(self, module) if isinstance(module, Ernie4_5_MoeStatics): module.e_score_correction_bias.data.zero_() @auto_docstring class Ernie4_5_MoeModel(Ernie4_5_MoePreTrainedModel): def __init__(self, config: Ernie4_5_MoeConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Ernie4_5_MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Ernie4_5_MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Ernie4_5_MoeRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring class Ernie4_5_MoeForCausalLM(MixtralForCausalLM, Ernie4_5_MoePreTrainedModel): def __init__(self, config): Ernie4_5_MoePreTrainedModel().__init__(config) self.model = Ernie4_5_MoeModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=config.use_bias) self.router_aux_loss_coef = config.router_aux_loss_coef self.num_experts = config.moe_num_experts self.num_experts_per_tok = config.moe_k # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward(self, **super_kwargs): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ super().forward(**super_kwargs) __all__ = [ "Ernie4_5_MoeForCausalLM", "Ernie4_5_MoeModel", "Ernie4_5_MoePreTrainedModel", ]
transformers/src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py/0
{ "file_path": "transformers/src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py", "repo_id": "transformers", "token_count": 6092 }
481
# coding=utf-8 # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for ESM.""" import os from typing import Optional from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} def load_vocab_file(vocab_file): with open(vocab_file, "r") as f: lines = f.read().splitlines() return [l.strip() for l in lines] class EsmTokenizer(PreTrainedTokenizer): """ Constructs an ESM tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, unk_token="<unk>", cls_token="<cls>", pad_token="<pad>", mask_token="<mask>", eos_token="<eos>", **kwargs, ): self.all_tokens = load_vocab_file(vocab_file) self._id_to_token = dict(enumerate(self.all_tokens)) self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)} super().__init__( unk_token=unk_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, eos_token=eos_token, **kwargs, ) # TODO, all the tokens are added? But they are also part of the vocab... bit strange. # none of them are special, but they all need special splitting. self.unique_no_split_tokens = self.all_tokens self._update_trie(self.unique_no_split_tokens) def _convert_id_to_token(self, index: int) -> str: return self._id_to_token.get(index, self.unk_token) def _convert_token_to_id(self, token: str) -> int: return self._token_to_id.get(token, self._token_to_id.get(self.unk_token)) def _tokenize(self, text, **kwargs): return text.split() def get_vocab(self): base_vocab = self._token_to_id.copy() base_vocab.update(self.added_tokens_encoder) return base_vocab def token_to_id(self, token: str) -> int: return self._token_to_id.get(token, self._token_to_id.get(self.unk_token)) def id_to_token(self, index: int) -> str: return self._id_to_token.get(index, self.unk_token) def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: cls = [self.cls_token_id] sep = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_1 is None: if self.eos_token_id is None: return cls + token_ids_0 else: return cls + token_ids_0 + sep elif self.eos_token_id is None: raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!") return cls + token_ids_0 + sep + token_ids_1 + sep # Multiple inputs always have an EOS token def get_special_tokens_mask( self, token_ids_0: list, token_ids_1: Optional[list] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`list[int]`): List of ids of the first sequence. token_ids_1 (`list[int]`, *optional*): List of ids of the second sequence. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if token in self.all_special_ids else 0 for token in token_ids_0] mask = [1] + ([0] * len(token_ids_0)) + [1] if token_ids_1 is not None: mask += [0] * len(token_ids_1) + [1] return mask def save_vocabulary(self, save_directory, filename_prefix): vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.txt") with open(vocab_file, "w") as f: f.write("\n".join(self.all_tokens)) return (vocab_file,) @property def vocab_size(self) -> int: return len(self.all_tokens) __all__ = ["EsmTokenizer"]
transformers/src/transformers/models/esm/tokenization_esm.py/0
{ "file_path": "transformers/src/transformers/models/esm/tokenization_esm.py", "repo_id": "transformers", "token_count": 2326 }
482
# coding=utf-8 # Copyright 2025 TII and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script can be used to convert checkpoints provided in the `mamba_ssm` library into the format provided in HuggingFace `transformers`. It depends on the `mamba2_ssm` package to be installed.""" import argparse import torch from transformers import AutoModelForCausalLM, AutoTokenizer, FalconH1Config, FalconH1ForCausalLM CONVERSION_MAPPING = { "backbone": "model", "embeddings": "embed_tokens", "mixer.": "", "mixer_ssm": "mamba", "mixer_attn": "self_attn", "mlp.": "feed_forward.", "mlp_norm": "pre_ff_layernorm", "ssm_proj": "mamba.in_proj", "attn_out_proj": "o_proj", ".norm.": ".input_layernorm.", ".mamba.input_layernorm.": ".mamba.norm.", ".ssm_out_proj.": ".mamba.out_proj.", "norm_f": "final_layernorm", } def convert_falcon_h1_to_hf(input_model_path, output_path): tokenizer = AutoTokenizer.from_pretrained(input_model_path) model = AutoModelForCausalLM.from_pretrained(input_model_path, dtype=torch.bfloat16, trust_remote_code=True) intermediate_size = int(model.config.expansion_factor * model.config.hidden_size) if intermediate_size % 2 != 0: intermediate_size = intermediate_size + (intermediate_size % 2) new_config = FalconH1Config( vocab_size=model.config.vocab_size, tie_word_embeddings=model.config.tie_word_embeddings, hidden_size=model.config.hidden_size, intermediate_size=intermediate_size, mamba_d_state=model.config.state_size, num_hidden_layers=model.config.num_hidden_layers, mamba_use_mlp=model.config.use_mlp, rms_norm_eps=model.config.layer_norm_epsilon, pad_token_id=model.config.pad_token_id, eos_token_id=model.config.eos_token_id, mamba_expand=model.config.expand, mamba_d_conv=model.config.conv_kernel, mamba_n_groups=model.config.n_groups, mamba_n_heads=model.config.num_heads, mamba_norm_before_gate=model.config.norm_before_gate, mamba_rms_norm=model.config.rms_norm, mamba_d_ssm=model.config.d_ssm, attention_bias=model.config.use_bias, projectors_bias=model.config.use_bias, mamba_conv_bias=model.config.use_conv_bias, hidden_act=model.config.hidden_act, use_cache=model.config.use_cache, mamba_chunk_size=model.config.chunk_size, num_attention_heads=model.config.num_heads_mha, num_key_value_heads=model.config.num_key_value_heads, head_dim=model.config.head_dim_mha, lm_head_multiplier=model.config.lm_head_multiplier, embedding_multiplier=model.config.embedding_multiplier, mlp_multipliers=model.config.mlp_multipliers, key_multiplier=model.config.key_multiplier, attention_out_multiplier=model.config.attention_out_multiplier, attention_in_multiplier=model.config.attention_in_multiplier, ssm_multipliers=model.config.ssm_multipliers, ssm_in_multiplier=model.config.ssm_in_multiplier, ssm_out_multiplier=model.config.ssm_out_multiplier, rope_theta=model.config.rope_theta, ) old_state_dict = model.state_dict() new_state_dict = {} for old_key, old_value in old_state_dict.items(): new_key = old_key for conversion_key, conversion_value in CONVERSION_MAPPING.items(): if conversion_key in old_key: new_key = new_key.replace(conversion_key, conversion_value) if "mamba.input_layernorm" in new_key: new_key = new_key.replace("mamba.input_layernorm", "mamba.norm") # Special processing for attention layers if "self_attn.attn_proj" in new_key: num_heads = new_config.num_attention_heads num_kv_heads = new_config.num_key_value_heads head_dim = new_config.head_dim q_proj, k_proj, v_proj = old_value.split( [ num_heads * head_dim, num_kv_heads * head_dim, num_kv_heads * head_dim, ], dim=0, ) new_state_dict[new_key.replace("attn_proj", "q_proj")] = q_proj new_state_dict[new_key.replace("attn_proj", "k_proj")] = k_proj new_state_dict[new_key.replace("attn_proj", "v_proj")] = v_proj else: new_state_dict[new_key] = old_value with torch.device("meta"): new_model = FalconH1ForCausalLM(new_config) del model new_model.load_state_dict(new_state_dict, strict=True, assign=True) new_model.save_pretrained(output_path) tokenizer.save_pretrained(output_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-i", "--mamba_ssm_checkpoint_directory", type=str, required=True, help="Path to a directory containing the `pytorch_model.bin` mamba_ssm checkpoint file to be converted.", ) parser.add_argument( "-o", "--output_dir", type=str, required=True, help="Path to directory to save the converted output model to." ) args = parser.parse_args() convert_falcon_h1_to_hf( args.mamba_ssm_checkpoint_directory, args.output_dir, )
transformers/src/transformers/models/falcon_h1/convert_mamba_ssm_checkpoint.py/0
{ "file_path": "transformers/src/transformers/models/falcon_h1/convert_mamba_ssm_checkpoint.py", "repo_id": "transformers", "token_count": 2566 }
483
# coding=utf-8 # Copyright 2025 Microsoft and the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import re from dataclasses import dataclass from typing import Any, Callable, Optional, Union import numpy as np from ...activations import ACT2FN from ...cache_utils import Cache from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import Seq2SeqLMOutput, Seq2SeqModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import MultiModalData, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import ( TransformersKwargs, auto_docstring, can_return_tuple, is_torch_available, logging, ) from ..auto import CONFIG_MAPPING, AutoConfig from ..bart.modeling_bart import eager_attention_forward from ..beit.modeling_beit import BeitDropPath from ..llama4.modeling_llama4 import Llama4VisionMLP from ..llava.modeling_llava import LlavaForConditionalGeneration, LlavaModel, LlavaPreTrainedModel from ..llava.processing_llava import LlavaProcessorKwargs if is_torch_available(): import torch import torch.nn as nn import torch.nn.functional as F logger = logging.get_logger(__name__) class Florence2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Florence2VisionModel`]. It is used to instantiate a Florence2VisionModel according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Florence2VisionModel architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: in_channels (`int`, *optional*, defaults to 3): Number of input image channels. depths (`Tuple[int]`, *optional*, defaults to `(1, 1, 9, 1)`): The depth of the model. patch_size (`Tuple[int]`, *optional*, defaults to `(7, 3, 3, 3)`): The patch size of the image. patch_stride (`Tuple[int]`, *optional*, defaults to `(4, 2, 2, 2)`): The patch stride of the image. patch_padding (`Tuple[int]`, *optional*, defaults to `(3, 1, 1, 1)`): The patch padding of the image. patch_prenorm (`Tuple[bool]`, *optional*, defaults to `(False, True, True, True)`): Whether to apply layer normalization before the patch embedding layer. embed_dim (`Tuple[int]`, *optional*, defaults to `(128, 256, 512, 1024)`): The dimension of the embedding layer. num_heads (`Tuple[int]`, *optional*, defaults to `(4, 8, 16, 32)`): The number of attention heads. num_groups (`Tuple[int]`, *optional*, defaults to `(4, 8, 16, 32)`): The number of groups. window_size (`int`, *optional*, defaults to 12): The window size of the model. drop_path_rate (`float`, *optional*, defaults to 0.1): The dropout rate of the drop path layer. mlp_ratio (`int`, *optional*, defaults to 4.0): Ratio of mlp hidden dim to embedding dim. qkv_bias (`bool`, *optional*, defaults to `True`): If True, add a learnable bias to query, key, value. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. projection_dim (`int`, *optional*, defaults to 1024): The dimension of the projection layer. max_temporal_embeddings (`int`, *optional*, defaults to 100): The configuration of the visual temporal embedding. max_position_embeddings (`int`, *optional*, defaults to 50): The configuration of the image position embedding. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import Florence2VisionConfig, Florence2VisionModel >>> # Initializing a Florence2 Vision style configuration >>> configuration = Florence2VisionConfig() >>> # Initializing a model (with random weights) >>> model = Florence2VisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "florence_vision" def __init__( self, in_channels=3, depths=(1, 1, 9, 1), patch_size=(7, 3, 3, 3), patch_stride=(4, 2, 2, 2), patch_padding=(3, 1, 1, 1), patch_prenorm=(False, True, True, True), embed_dim=(128, 256, 512, 1024), num_heads=(4, 8, 16, 32), num_groups=(4, 8, 16, 32), window_size=12, drop_path_rate=0.1, mlp_ratio=4.0, qkv_bias=True, activation_function="gelu", projection_dim=1024, max_temporal_embeddings=100, max_position_embeddings=50, initializer_range=0.02, **kwargs, ): self.in_channels = in_channels self.depths = list(depths) self.patch_size = list(patch_size) self.patch_stride = list(patch_stride) self.patch_padding = list(patch_padding) self.patch_prenorm = list(patch_prenorm) self.embed_dim = list(embed_dim) self.num_heads = list(num_heads) self.num_groups = list(num_groups) self.window_size = window_size self.drop_path_rate = drop_path_rate self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.projection_dim = projection_dim self.max_temporal_embeddings = max_temporal_embeddings self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.activation_function = activation_function super().__init__(**kwargs) class Florence2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Florence2ForConditionalGeneration`]. It is used to instantiate an Florence-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Florence-2 [microsoft/Florence-2-base](https://huggingface.co/microsoft/Florence-2-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`AutoConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Florence2VisionConfig`]. image_token_id (`int`, *optional*, defaults to 51289): The image token index to encode the image prompt. is_encoder_decoder (bool, optional, *optional*, defaults to `True`): Whether the model is used as an encoder/decoder or not. Example: ```python >>> from transformers import Florence2ForConditionalGeneration, Florence2Config, CLIPVisionConfig, BartConfig >>> # Initializing a clip-like vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Bart config >>> text_config = BartConfig() >>> # Initializing a Florence-2 configuration >>> configuration = Florence2Config(vision_config, text_config) >>> # Initializing a model from the florence-2 configuration >>> model = Florence2ForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "florence2" sub_configs = { "text_config": AutoConfig, "vision_config": Florence2VisionConfig, } def __init__( self, text_config=None, vision_config=None, image_token_id=51289, is_encoder_decoder=True, **kwargs, ): if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "bart") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["bart"]() if isinstance(vision_config, dict): vision_config = Florence2VisionConfig(**vision_config) elif vision_config is None: logger.info("vision_config is None. Initializing the Florence2VisionConfig with default values.") vision_config = Florence2VisionConfig() self.text_config = text_config self.vision_config = vision_config self.image_token_id = image_token_id super().__init__( is_encoder_decoder=is_encoder_decoder, **kwargs, ) class Florence2ProcessorKwargs(LlavaProcessorKwargs): pass class Florence2Processor(ProcessorMixin): r""" Constructs a Florence2 processor which wraps a Florence2 image processor and a Florence2 tokenizer into a single processor. [`Florence2Processor`] offers all the functionalities of [`AutoImageProcessor`] and [`BartTokenizerFast`]. See the [`~Florence2Processor.__call__`] and [`~Florence2Processor.decode`] for more information. Args: image_processor (`AutoImageProcessor`, *optional*): The image processor is a required input. tokenizer (`Union[BartTokenizer, BartTokenizerFast]`, *optional*): The tokenizer is a required input. num_additional_image_tokens (`int`, *optional*, defaults to 0): Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other extra tokens appended, no need to set this arg. post_processor_config (`dict`, *optional*, defaults to 0): Task-specific parsing rules for [`Florence2PostProcessor`], e.g. regex patterns, thresholds, or banned tokens. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = ("BartTokenizer", "BartTokenizerFast") def __init__( self, image_processor=None, tokenizer=None, num_additional_image_tokens: int = 0, post_processor_config: Optional[dict] = None, **kwargs, ): self.tasks_answer_post_processing_type = { "<OCR>": "pure_text", "<OCR_WITH_REGION>": "ocr", "<CAPTION>": "pure_text", "<DETAILED_CAPTION>": "pure_text", "<MORE_DETAILED_CAPTION>": "pure_text", "<OD>": "description_with_bboxes", "<DENSE_REGION_CAPTION>": "description_with_bboxes", "<CAPTION_TO_PHRASE_GROUNDING>": "phrase_grounding", "<REFERRING_EXPRESSION_SEGMENTATION>": "polygons", "<REGION_TO_SEGMENTATION>": "polygons", "<OPEN_VOCABULARY_DETECTION>": "description_with_bboxes_or_polygons", "<REGION_TO_CATEGORY>": "pure_text", "<REGION_TO_DESCRIPTION>": "pure_text", "<REGION_TO_OCR>": "pure_text", "<REGION_PROPOSAL>": "bboxes", } self.task_prompts_without_inputs = { "<OCR>": "What is the text in the image?", "<OCR_WITH_REGION>": "What is the text in the image, with regions?", "<CAPTION>": "What does the image describe?", "<DETAILED_CAPTION>": "Describe in detail what is shown in the image.", "<MORE_DETAILED_CAPTION>": "Describe with a paragraph what is shown in the image.", "<OD>": "Locate the objects with category name in the image.", "<DENSE_REGION_CAPTION>": "Locate the objects in the image, with their descriptions.", "<REGION_PROPOSAL>": "Locate the region proposals in the image.", } self.task_prompts_with_input = { "<CAPTION_TO_PHRASE_GROUNDING>": "Locate the phrases in the caption: {input}", "<REFERRING_EXPRESSION_SEGMENTATION>": "Locate {input} in the image with mask", "<REGION_TO_SEGMENTATION>": "What is the polygon mask of region {input}", "<OPEN_VOCABULARY_DETECTION>": "Locate {input} in the image.", "<REGION_TO_CATEGORY>": "What is the region {input}?", "<REGION_TO_DESCRIPTION>": "What does the region {input} describe?", "<REGION_TO_OCR>": "What text is in the region {input}?", } self.num_image_tokens = image_processor.image_seq_length self.num_additional_image_tokens = num_additional_image_tokens self.post_processor_config = post_processor_config self.post_processor = Florence2PostProcessor(config=post_processor_config, tokenizer=tokenizer) self.image_token = tokenizer.image_token self.image_token_id = tokenizer.image_token_id super().__init__(image_processor, tokenizer, **kwargs) def _construct_prompts(self, text: Union[str, list[str]]) -> list[str]: """ Construct prompts by replacing task tokens with corresponding prompt strings. """ if isinstance(text, str): text = [text] prompts = [] for prompt in text: # Check for tasks without inputs for task_token, task_prompt in self.task_prompts_without_inputs.items(): if task_token in prompt: if prompt != task_token: raise ValueError(f"Task token {task_token} should be the only content in the prompt.") prompt = task_prompt break # Check for tasks with inputs for task_token, task_prompt in self.task_prompts_with_input.items(): if task_token in prompt: input_text = prompt.replace(task_token, "").strip() prompt = task_prompt.format(input=input_text) break prompts.append(prompt) return prompts def __call__( self, images: Optional[ImageInput] = None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, **kwargs: Unpack[Florence2ProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to BartTokenizerFast's [`~BartTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if images is None and text is None: raise ValueError("You have to specify at least one of `images` or `text`.") output_kwargs = self._merge_kwargs( Florence2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) image_inputs = {} if images is not None: image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"]) if text is None: logger.warning_once("You are using Florence-2 without a text prefix.") text = [""] * (1 if not isinstance(images, list) else len(images)) elif isinstance(text, str): text = [text] if not isinstance(text, list) or not all(isinstance(token, str) for token in text): raise ValueError("`text` must be a string or list of strings.") if isinstance(images, list) and len(images) != len(text): raise ValueError(f"Number of images ({len(images)}) must match number of texts ({len(text)}).") prompt_strings = self._construct_prompts(text) # Add image tokens and special tokens if images are provided if image_inputs.get("pixel_values") is not None: # Replace the image token with the expanded image token sequence expanded_image_prompts = [] for sample in prompt_strings: sample = ( self.image_token * self.num_image_tokens + self.tokenizer.bos_token + sample + self.tokenizer.eos_token ) expanded_image_prompts.append(sample) prompt_strings = expanded_image_prompts # Construct and tokenize prompts output_kwargs["text_kwargs"].pop("add_special_tokens", None) return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False) text_inputs = self.tokenizer( prompt_strings, **output_kwargs["text_kwargs"], add_special_tokens=False, return_tensors=None ) self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"]) if return_mm_token_type_ids: array_ids = np.array(text_inputs["input_ids"]) mm_token_type_ids = np.zeros_like(text_inputs["input_ids"]) mm_token_type_ids[array_ids == self.image_token_id] = 1 text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist() return BatchFeature(data={**image_inputs, **text_inputs}, tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to BartTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to BartTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: num_image_tokens = [self.image_seq_length] * len(image_sizes) num_image_patches = [1] * len(image_sizes) vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches}) return MultiModalData(**vision_data) def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=False, **kwargs): """ Post-processes the output of `FuyuForConditionalGeneration` to only return the text output. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model. The output is expected to be a tensor of shape `(batch_size, sequence_length)` containing the token ids of the generated sequences. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. **kwargs: Additional arguments to be passed to the tokenizer's `batch_decode method`. Returns: `list[str]`: The decoded text output. """ return self.batch_decode(generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs) def post_process_generation(self, text=None, sequence=None, task=None, image_size=None) -> dict[str, Any]: """ Post-process generation outputs based on the task. Args: text (`str`, *optional*): Generated text. sequence (`Union[List[int], torch.Tensor]`, *optional*): Generated token sequence. task (`str`, *optional*): The task for post-processing. image_size (`Tuple[int, int]`, *optional*): Image size for dequantization. Returns: `Dict[str, Any]`: Post-processed results keyed by task. """ if task is None: raise ValueError("`task` must be provided for post-processing.") post_proc_type = self.tasks_answer_post_processing_type.get(task, "pure_text") parsed = self.post_processor( text=text, sequence=sequence, image_size=image_size, parse_tasks=[post_proc_type], )[post_proc_type] if post_proc_type == "pure_text": final_answer = parsed.replace("<s>", "").replace("</s>", "").strip() elif post_proc_type in ["description_with_bboxes", "bboxes"]: bboxes = [inst["bbox"] for inst in parsed] labels = [inst["cat_name"] for inst in parsed] final_answer = {"bboxes": bboxes, "labels": labels} if parsed and "score" in parsed[0]: final_answer["scores"] = [inst["score"] for inst in parsed] elif post_proc_type == "ocr": quad_boxes = [inst["quad_box"] for inst in parsed] labels = [inst["text"] for inst in parsed] final_answer = {"quad_boxes": quad_boxes, "labels": labels} elif post_proc_type == "phrase_grounding": bboxes = [] labels = [] for inst in parsed: for bbox in inst["bbox"]: bboxes.append(bbox) labels.append(inst["cat_name"]) final_answer = {"bboxes": bboxes, "labels": labels} elif post_proc_type in ["description_with_polygons", "polygons"]: polygons = [inst["polygons"] for inst in parsed] labels = [inst["cat_name"] for inst in parsed] final_answer = {"polygons": polygons, "labels": labels} elif post_proc_type == "description_with_bboxes_or_polygons": bboxes = [] bboxes_labels = [] polygons = [] polygons_labels = [] for inst in parsed: label = inst["cat_name"] if "polygons" in inst: polygons.append(inst["polygons"]) polygons_labels.append(label) else: bboxes.append(inst["bbox"]) bboxes_labels.append(label) final_answer = { "bboxes": bboxes, "bboxes_labels": bboxes_labels, "polygons": polygons, "polygons_labels": polygons_labels, } else: raise ValueError(f"Unknown post-processing type: {post_proc_type}") return {task: final_answer} class Florence2PostProcessor: """ Post-processor for Florence-2 model outputs. Parses generated text into structured results for various tasks like object detection, OCR, phrase grounding, etc. Args: tokenizer (`PreTrainedTokenizer`): The tokenizer used for decoding model outputs. """ def __init__(self, config, tokenizer): self.tokenizer = tokenizer self.parse_task_config = config or {} self.banned_grounding_tokens = set( self.parse_task_config.get("phrase_grounding", {}).get("banned_grounding_tokens", []) ) self.all_special_tokens = set(self.tokenizer.all_special_tokens) self.quantize_bins = (1000, 1000) def quantize(self, locations: "torch.Tensor", size: tuple[int, int]) -> "torch.Tensor": """ Quantize locations. Args: locations (`torch.Tensor`): Tensor of shape (N, 4) for boxes or (N, 2) for points/coordinates. size (`tuple[int, int]`): Original image size (width, height). Returns: `torch.Tensor`: Quantized locations as integers. """ bins_w, bins_h = self.quantize_bins size_w, size_h = size per_bin_w = size_w / bins_w per_bin_h = size_h / bins_h if locations.shape[-1] == 4: # Bounding boxes: [xmin, ymin, xmax, ymax] xmin, ymin, xmax, ymax = locations.split(1, dim=-1) q_xmin = (xmin / per_bin_w).floor().clamp(0, bins_w - 1) q_ymin = (ymin / per_bin_h).floor().clamp(0, bins_h - 1) q_xmax = (xmax / per_bin_w).floor().clamp(0, bins_w - 1) q_ymax = (ymax / per_bin_h).floor().clamp(0, bins_h - 1) return torch.cat([q_xmin, q_ymin, q_xmax, q_ymax], dim=-1).int() elif locations.shape[-1] == 2: # Points/coordinates: [x, y] x, y = locations.split(1, dim=-1) q_x = (x / per_bin_w).floor().clamp(0, bins_w - 1) q_y = (y / per_bin_h).floor().clamp(0, bins_h - 1) return torch.cat([q_x, q_y], dim=-1).int() else: raise ValueError(f"Unsupported location shape: last dim must be 2 or 4, got {locations.shape[-1]}.") def dequantize(self, locations: "torch.Tensor", size: tuple[int, int]) -> "torch.Tensor": """ Dequantize locations back to original scale. Args: locations (`torch.Tensor`): Quantized tensor of shape (N, 4) for boxes or (N, 2) for points/coordinates. size (`tuple[int, int]`): Original image size (width, height). Returns: `torch.Tensor`: Dequantized locations as floats. """ bins_w, bins_h = self.quantize_bins size_w, size_h = size per_bin_w = size_w / bins_w per_bin_h = size_h / bins_h # Add 0.5 to use the center position of the bin as the coordinate. if locations.shape[-1] == 4: # Bounding boxes xmin, ymin, xmax, ymax = locations.split(1, dim=-1) dq_xmin = (xmin + 0.5) * per_bin_w dq_ymin = (ymin + 0.5) * per_bin_h dq_xmax = (xmax + 0.5) * per_bin_w dq_ymax = (ymax + 0.5) * per_bin_h return torch.cat([dq_xmin, dq_ymin, dq_xmax, dq_ymax], dim=-1).int() elif locations.shape[-1] == 2: # Points/coordinates x, y = locations.split(1, dim=-1) dq_x = (x + 0.5) * per_bin_w dq_y = (y + 0.5) * per_bin_h return torch.cat([dq_x, dq_y], dim=-1).int() else: raise ValueError(f"Unsupported location shape: last dim must be 2 or 4, got {locations.shape[-1]}.") def decode_with_spans(self, token_ids: list[int]) -> tuple[str, list[tuple[int, int]]]: """ Decode token IDs to text and compute character spans. Args: token_ids (`list[int]`): list of token IDs to decode. Returns: `tuple[str, list[tuple[int, int]]]`: Decoded text and list of spans (start, end) for each token. """ filtered_tokens = self.tokenizer.convert_ids_to_tokens(token_ids, skip_special_tokens=False) text = "" spans = [] for token in filtered_tokens: if token in self.all_special_tokens: sub_text = token else: sub_text = self.tokenizer.convert_tokens_to_string([token]) span = (len(text), len(text) + len(sub_text)) text += sub_text spans.append(span) return text, spans def parse_ocr_from_text_and_spans( self, text: str, pattern: Optional[str], image_size: tuple[int, int], area_threshold: float = 0.0 ) -> list[dict[str, Any]]: """ Parse OCR results with quadrilateral boxes. Args: text (`str`): The generated text. pattern (`str`): Regex pattern for matching. image_size (`tuple[int, int]`): Image size (width, height). area_threshold (`float`, *optional*, defaults to 0.0): Minimum area threshold for filtering boxes. Returns: `list[dict[str, Any]]`: list of instances with 'quad_box' and 'text'. """ text = text.replace("<s>", "").replace("</s>", "").replace("<pad>", "") if pattern is None: pattern = r"(.+?)<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>" matches = re.findall(pattern, text) instances = [] width, height = image_size for content, *quad_str in matches: quad_bins = [int(i) for i in quad_str] quad_box = self.dequantize(torch.tensor(quad_bins).reshape(-1, 2), size=image_size).flatten().tolist() if area_threshold > 0: x_coords = quad_box[0::2] y_coords = quad_box[1::2] # Apply the Shoelace formula area = 0.5 * abs( sum(x_coords[i] * y_coords[i + 1] - x_coords[i + 1] * y_coords[i] for i in range(4 - 1)) ) if area < (width * height) * area_threshold: continue instances.append({"quad_box": quad_box, "text": content.strip()}) return instances def parse_phrase_grounding_from_text_and_spans( self, text: str, image_size: tuple[int, int] ) -> list[dict[str, Any]]: """ Parse phrase grounding results. Args: text (`str`): The generated text. image_size (`tuple[int, int]`): Image size (width, height). Returns: `list[dict[str, Any]]`: list of instances with 'bbox' and 'cat_name'. """ text = text.replace("<s>", "").replace("</s>", "").replace("<pad>", "") phrase_pattern = r"([^<]+(?:<loc_\d+>){4,})" phrases = re.findall(phrase_pattern, text) text_pattern = r"^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_)" box_pattern = r"<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>" instances = [] for phrase_text in phrases: phrase_text = phrase_text.replace("<ground>", "", 1).replace("<obj>", "", 1) if not phrase_text: continue match = re.search(text_pattern, phrase_text) if not match: continue phrase = match.group().strip() if phrase in self.banned_grounding_tokens: continue boxes_matches = list(re.finditer(box_pattern, phrase_text)) if not boxes_matches: continue bbox_bins = [[int(m.group(j)) for j in range(1, 5)] for m in boxes_matches] bboxes = self.dequantize(torch.tensor(bbox_bins), size=image_size).tolist() phrase = phrase.encode("ascii", "ignore").decode("ascii") instances.append({"bbox": bboxes, "cat_name": phrase}) return instances def _find_matched_token_indices(self, cur_span: tuple[int, int], token_spans: list[tuple[int, int]]) -> list[int]: return [i for i, span in enumerate(token_spans) if not (span[1] <= cur_span[0] or span[0] >= cur_span[1])] def parse_description_with_bboxes_from_text_and_spans( self, text: str, image_size: tuple[int, int], allow_empty_phrase: bool = False, ) -> list[dict[str, Any]]: """ Parse descriptions with bounding boxes. Args: text (`str`): The generated text. image_size (`tuple[int, int]`): Image size (width, height). allow_empty_phrase (`bool`, *optional*, defaults to `False`): Allow phrases without text. Returns: `list[dict[str, Any]]`: list of instances with 'bbox', 'cat_name', and optional 'score'. """ text = text.replace("<s>", "").replace("</s>", "").replace("<pad>", "") if allow_empty_phrase: pattern = r"(?:(?:<loc_\d+>){4,})" else: pattern = r"([^<]+(?:<loc_\d+>){4,})" phrases = re.findall(pattern, text) text_pattern = r"^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_)" box_pattern = r"<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>" instances = [] for phrase_text in phrases: phrase_text = phrase_text.replace("<ground>", "", 1).replace("<obj>", "", 1) if not phrase_text and not allow_empty_phrase: continue match = re.search(text_pattern, phrase_text) if not match: continue phrase = match.group().strip() boxes_matches = list(re.finditer(box_pattern, phrase_text)) if not boxes_matches: continue bbox_bins = [[int(m.group(j)) for j in range(1, 5)] for m in boxes_matches] bboxes = self.dequantize(torch.tensor(bbox_bins), size=image_size).tolist() phrase = phrase.encode("ascii", "ignore").decode("ascii") for bbox in bboxes: instance = {"bbox": bbox, "cat_name": phrase} instances.append(instance) return instances def parse_description_with_polygons_from_text_and_spans( self, text: str, image_size: tuple[int, int], allow_empty_phrase: bool = False, polygon_sep_token: str = "<sep>", polygon_start_token: str = "<poly>", polygon_end_token: str = "</poly>", with_box_at_start: bool = False, ) -> list[dict[str, Any]]: """ Parse descriptions with polygons. Args: text (`str`): The generated text. image_size (`tuple[int, int]`): Image size (width, height). allow_empty_phrase (`bool`, *optional*, defaults to `False`): Allow phrases without text. polygon_sep_token (`str`, *optional*, defaults to "<sep>"): Token separating polygons. polygon_start_token (`str`, *optional*, defaults to "<poly>"): Start token for polygons. polygon_end_token (`str`, *optional*, defaults to "</poly>"): End token for polygons. with_box_at_start (`bool`, *optional*, defaults to `False`): Whether a bounding box is at the start of polygons. Returns: `list[dict[str, Any]]`: list of instances with 'polygons', 'cat_name', and optional 'bbox'. """ text = text.replace("<s>", "").replace("</s>", "").replace("<pad>", "") if allow_empty_phrase: pattern = rf"(?:(?:<loc_\d+>|{re.escape(polygon_sep_token)}|{re.escape(polygon_start_token)}|{re.escape(polygon_end_token)}){{4,}})" else: pattern = rf"([^<]+(?:<loc_\d+>|{re.escape(polygon_sep_token)}|{re.escape(polygon_start_token)}|{re.escape(polygon_end_token)}){{4,}})" phrases = re.findall(pattern, text) phrase_pattern = r"^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_|<poly>)" poly_instance_pattern = rf"{re.escape(polygon_start_token)}(.*?){re.escape(polygon_end_token)}" box_pattern = rf"((?:<loc_\d+>)+)(?:{re.escape(polygon_sep_token)}|$)" instances = [] for phrase_text in phrases: phrase_text_strip = re.sub(r"^<loc_\d+>", "", phrase_text, count=1) if not phrase_text_strip and not allow_empty_phrase: continue match = re.search(phrase_pattern, phrase_text_strip) if not match: continue phrase = match.group().strip() if polygon_start_token in phrase_text and polygon_end_token in phrase_text: poly_instances = [m.group(1) for m in re.finditer(poly_instance_pattern, phrase_text)] else: poly_instances = [phrase_text] for poly_inst in poly_instances: poly_matches = list(re.finditer(box_pattern, poly_inst)) if len(poly_matches) == 0: continue bbox = [] polygons = [] for poly_match in poly_matches: poly_str = poly_match.group(1) poly_bins = [int(m.group(1)) for m in re.finditer(r"<loc_(\d+)>", poly_str)] if with_box_at_start and not bbox: if len(poly_bins) > 4: bbox = poly_bins[:4] poly_bins = poly_bins[4:] else: bbox = [0, 0, 0, 0] if len(poly_bins) % 2 == 1: poly_bins = poly_bins[:-1] poly_coords = ( self.dequantize(torch.tensor(poly_bins).reshape(-1, 2), size=image_size).flatten().tolist() ) polygons.append(poly_coords) instance = {"cat_name": phrase, "polygons": polygons} if bbox: instance["bbox"] = self.dequantize(torch.tensor([bbox]), size=image_size)[0].tolist() instances.append(instance) return instances def __call__(self, text=None, sequence=None, image_size=None, parse_tasks=None) -> dict[str, Any]: """ Process model output and parse into task-specific results. Args: text (`Optional[str]`, *optional*): Generated text. Either this or `sequence` must be provided. sequence (`Optional[Union[list[int], torch.Tensor]]`, *optional*): Token sequence. Either this or `text` must be provided. image_size (`Optional[tuple[int, int]]`, *optional*): Image size (width, height) required for dequantization. parse_tasks (`Optional[Union[str, list[str]]]`, *optional*): Specific tasks to parse. If None, parse all supported tasks. Returns: `dict[str, Any]`: Parsed results for each task, including the raw 'text'. """ if parse_tasks is not None: parse_tasks = [parse_tasks] if isinstance(parse_tasks, str) else parse_tasks for task in parse_tasks: if task not in self.parse_task_config.keys(): raise ValueError(f"Unsupported parse task: {task}") if (text is None and sequence is None) or (text is not None and sequence is not None): raise ValueError("Exactly one of 'text' or 'sequence' must be provided.") if sequence is not None: if isinstance(sequence, torch.Tensor): sequence = sequence.tolist() sequence = sequence[1:] if sequence[0] == self.tokenizer.bos_token_id else sequence # Skip BOS if present text, _ = self.decode_with_spans(sequence) parsed_dict = {"text": text} tasks_to_parse = parse_tasks or self.parse_task_config.keys() for task in tasks_to_parse: config = self.parse_task_config[task] pattern = config.get("PATTERN") if task == "ocr": parsed_dict["ocr"] = self.parse_ocr_from_text_and_spans( text, pattern=pattern, image_size=image_size, area_threshold=config.get("AREA_THRESHOLD", 0.0) ) elif task == "phrase_grounding": parsed_dict["phrase_grounding"] = self.parse_phrase_grounding_from_text_and_spans( text, image_size=image_size ) elif task == "pure_text": parsed_dict["pure_text"] = text elif task == "description_with_bboxes": parsed_dict["description_with_bboxes"] = self.parse_description_with_bboxes_from_text_and_spans( text, image_size=image_size ) elif task == "description_with_polygons": parsed_dict["description_with_polygons"] = self.parse_description_with_polygons_from_text_and_spans( text, image_size=image_size ) elif task == "polygons": parsed_dict["polygons"] = self.parse_description_with_polygons_from_text_and_spans( text, image_size=image_size, allow_empty_phrase=True ) elif task == "bboxes": parsed_dict["bboxes"] = self.parse_description_with_bboxes_from_text_and_spans( text, image_size=image_size, allow_empty_phrase=True ) elif task == "description_with_bboxes_or_polygons": if "<poly>" in text: instances = self.parse_description_with_polygons_from_text_and_spans(text, image_size=image_size) else: instances = self.parse_description_with_bboxes_from_text_and_spans(text, image_size=image_size) parsed_dict["description_with_bboxes_or_polygons"] = instances else: raise ValueError("task {} is not supported".format(task)) return parsed_dict class Florence2VisionDropPath(BeitDropPath): pass class Florence2VisionLearnedAbsolutePositionEmbedding2D(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, config: Florence2Config): super().__init__() num_pos = config.vision_config.max_position_embeddings embedding_dim = config.vision_config.embed_dim[-1] self.row_embeddings = nn.Embedding(num_pos, embedding_dim // 2) self.column_embeddings = nn.Embedding(num_pos, embedding_dim - (embedding_dim // 2)) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos class Florence2VisionPositionalEmbeddingCosine1D(nn.Module): """ This module generates 1D cosine positional embeddings using precomputed sinusoidal functions. """ def __init__(self, config: Florence2Config): super().__init__() self.embed_dim = config.vision_config.embed_dim[-1] self.max_seq_len = config.vision_config.max_temporal_embeddings pos_idx_to_embed = torch.empty((self.max_seq_len, self.embed_dim)) sine, cosine = self.get_sinusoid_embeddings( max_positions=self.max_seq_len, embed_dim=self.embed_dim, ) pos_idx_to_embed[:, 0::2] = sine pos_idx_to_embed[:, 1::2] = cosine # Save the positional embeddings in a constant buffer. self.register_buffer("pos_idx_to_embed", pos_idx_to_embed) @staticmethod def get_sinusoid_embeddings(max_positions: int, embed_dim: int): half_dim = embed_dim // 2 emb = math.log(10000) / half_dim emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(max_positions, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) return torch.sin(emb), torch.cos(emb) def forward(self, seq_embeds: torch.Tensor) -> torch.Tensor: len_seq = seq_embeds.size(1) if len_seq > self.max_seq_len: raise ValueError(f"Maximum sequence length {self.max_seq_len}, got {len_seq}") pos_embeds = self.pos_idx_to_embed[0:len_seq, :] return pos_embeds class Florence2VisionMLP(Llama4VisionMLP): def __init__(self, config: Florence2VisionConfig, stage_idx: int): super().__init__() self.fc1 = nn.Linear(config.embed_dim[stage_idx], int(config.embed_dim[stage_idx] * config.mlp_ratio)) self.activation_fn = ACT2FN[config.activation_function] self.fc2 = nn.Linear(int(config.embed_dim[stage_idx] * config.mlp_ratio), config.embed_dim[stage_idx]) class Florence2VisionConvEmbed(nn.Module): """Image to Patch Embedding""" def __init__(self, config: Florence2VisionConfig, stage_idx: int): super().__init__() self.config = config self.stage_idx = stage_idx self.patch_size = config.patch_size[stage_idx] self.in_channels = config.in_channels if stage_idx == 0 else config.embed_dim[stage_idx - 1] self.embed_dim = config.embed_dim[stage_idx] self.stride = config.patch_stride[stage_idx] self.padding = config.patch_padding[stage_idx] self.pre_norm = config.patch_prenorm[stage_idx] self.conv = nn.Conv2d( self.in_channels, self.embed_dim, kernel_size=self.patch_size, stride=self.stride, padding=self.padding, ) dim_norm = self.in_channels if self.pre_norm else self.embed_dim self.norm = nn.LayerNorm(dim_norm) def forward(self, hidden_states: torch.Tensor): if self.norm and self.pre_norm: hidden_states = hidden_states.permute(0, 2, 3, 1) hidden_states = self.norm(hidden_states) hidden_states = hidden_states.permute(0, 3, 1, 2) hidden_states = self.conv(hidden_states) if self.norm and not self.pre_norm: hidden_states = hidden_states.permute(0, 2, 3, 1) hidden_states = self.norm(hidden_states) hidden_states = hidden_states.permute(0, 3, 1, 2) return hidden_states class Florence2VisionChannelAttention(nn.Module): def __init__(self, config: Florence2VisionConfig, stage_idx: int): super().__init__() self.config = config self.dim = config.embed_dim[stage_idx] self.groups = config.num_groups[stage_idx] self.qkv = nn.Linear(self.dim, self.dim * 3, bias=config.qkv_bias) self.proj = nn.Linear(self.dim, self.dim) self.is_causal = False def forward(self, hidden_states: torch.Tensor): batch_size, num_tokens, hidden_size = hidden_states.shape # Reshape for grouped channel attention qkv = self.qkv(hidden_states).reshape(batch_size, num_tokens, 3, self.groups, hidden_size // self.groups) qkv = qkv.permute(2, 0, 3, 4, 1) query, key, value = qkv.unbind(0) scale = num_tokens**-0.5 # Channel-to-channel attention within groups: attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] hidden_states, _ = attention_interface( self, query, key, value, attention_mask=None, scaling=scale, ) hidden_states = hidden_states.permute(0, 3, 2, 1) hidden_states = hidden_states.reshape(batch_size, num_tokens, hidden_size) # Final projection hidden_states = self.proj(hidden_states) return hidden_states class Florence2VisionChannelBlock(nn.Module): def __init__( self, config: Florence2VisionConfig, stage_idx: int, drop_path_rate: float, ): super().__init__() self.config = config dim_in = config.embed_dim[stage_idx] self.conv1 = nn.Conv2d( dim_in, dim_in, kernel_size=3, padding=1, groups=dim_in, ) self.norm1 = nn.LayerNorm(config.embed_dim[stage_idx]) self.channel_attn = Florence2VisionChannelAttention(config=config, stage_idx=stage_idx) self.drop_path1 = Florence2VisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.conv2 = nn.Conv2d( dim_in, dim_in, kernel_size=3, padding=1, groups=dim_in, ) self.norm2 = nn.LayerNorm(config.embed_dim[stage_idx]) self.ffn = Florence2VisionMLP(config=config, stage_idx=stage_idx) self.drop_path2 = Florence2VisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() def forward(self, hidden_states: torch.Tensor): batch_size, embed_dim, height, width = hidden_states.shape # First channel block: Depthwise Conv + Channel Attention hidden_states = self.conv1(hidden_states) + hidden_states hidden_states = hidden_states.flatten(2).transpose(1, 2) residual = hidden_states # Channel group attention self-attention mechanism hidden_states = self.norm1(hidden_states) hidden_states = self.channel_attn(hidden_states) hidden_states = residual + self.drop_path1(hidden_states) hidden_states = hidden_states.transpose(1, 2).view(batch_size, embed_dim, height, width) # Second channel block: Depthwise Conv + FFN hidden_states = self.conv2(hidden_states) + hidden_states hidden_states = hidden_states.flatten(2).transpose(1, 2) residual = hidden_states # FFN hidden_states = self.norm2(hidden_states) hidden_states = self.ffn(hidden_states) hidden_states = residual + self.drop_path2(hidden_states) hidden_states = hidden_states.transpose(1, 2).view(batch_size, embed_dim, height, width) return hidden_states class Florence2VisionWindowAttention(nn.Module): def __init__(self, config: Florence2VisionConfig, stage_idx: int): super().__init__() self.config = config self.dim = config.embed_dim[stage_idx] self.window_size = config.window_size self.num_heads = config.num_heads[stage_idx] head_dim = self.dim // self.num_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(self.dim, self.dim * 3, bias=config.qkv_bias) self.proj = nn.Linear(self.dim, self.dim) self.is_causal = False def forward(self, hidden_states: torch.Tensor): batch_size, height, width, embed_dim = hidden_states.shape # Pad the input if necessary pad_left = pad_top = 0 pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size hidden_states = F.pad(hidden_states, (0, 0, pad_left, pad_right, pad_top, pad_bottom)) _, padded_height, padded_width, _ = hidden_states.shape # Partition input into non-overlapping windows (for local spatial attention in DaViT) hidden_states = hidden_states.view( batch_size, padded_height // self.window_size, self.window_size, padded_width // self.window_size, self.window_size, embed_dim, ) windowed_hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous() windowed_hidden_states = windowed_hidden_states.view(-1, self.window_size * self.window_size, embed_dim) # Generate Q, K, V for each window num_windows_per_batch, num_tokens_per_window, embed_dim = windowed_hidden_states.shape qkv = self.qkv(windowed_hidden_states).reshape( num_windows_per_batch, num_tokens_per_window, 3, self.num_heads, embed_dim // self.num_heads ) qkv = qkv.permute(2, 0, 3, 1, 4) query, key, value = qkv.unbind(0) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] windowed_hidden_states, _ = attention_interface( self, query, key, value, attention_mask=None, scaling=self.scale, ) windowed_hidden_states = windowed_hidden_states.view(num_windows_per_batch, num_tokens_per_window, embed_dim) windowed_hidden_states = self.proj(windowed_hidden_states) # Merge windows back to original spatial layout windowed_hidden_states = windowed_hidden_states.view(-1, self.window_size, self.window_size, embed_dim) hidden_states = windowed_hidden_states.view( -1, padded_height // self.window_size, padded_width // self.window_size, self.window_size, self.window_size, embed_dim, ) hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous() hidden_states = hidden_states.view(-1, padded_height, padded_width, embed_dim) hidden_states = hidden_states[:, :height, :width, :].contiguous() hidden_states = hidden_states.view(batch_size, height * width, embed_dim) return hidden_states class Florence2VisionSpatialBlock(nn.Module): def __init__( self, config: Florence2VisionConfig, stage_idx: int, drop_path_rate: float, ): super().__init__() self.conv1 = nn.Conv2d( config.embed_dim[stage_idx], config.embed_dim[stage_idx], kernel_size=3, padding=1, groups=config.embed_dim[stage_idx], ) self.norm1 = nn.LayerNorm(config.embed_dim[stage_idx]) self.window_attn = Florence2VisionWindowAttention(config=config, stage_idx=stage_idx) self.drop_path1 = Florence2VisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.conv2 = nn.Conv2d( config.embed_dim[stage_idx], config.embed_dim[stage_idx], kernel_size=3, padding=1, groups=config.embed_dim[stage_idx], ) self.norm2 = nn.LayerNorm(config.embed_dim[stage_idx]) self.ffn = Florence2VisionMLP(config=config, stage_idx=stage_idx) self.drop_path2 = Florence2VisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() def forward(self, hidden_states: torch.Tensor): batch_size, embed_dim, height, width = hidden_states.shape # First spatial mixing block: Conv + Window Attention hidden_states = self.conv1(hidden_states) + hidden_states hidden_states = hidden_states.flatten(2).transpose(1, 2) residual = hidden_states # Spatial Window-based self-attention mechanism hidden_states = self.norm1(hidden_states) hidden_states = hidden_states.view(batch_size, height, width, embed_dim) hidden_states = self.window_attn(hidden_states) hidden_states = residual + self.drop_path1(hidden_states) hidden_states = hidden_states.transpose(1, 2).view(batch_size, embed_dim, height, width) # Second spatial mixing block: Conv + FFN hidden_states = self.conv2(hidden_states) + hidden_states hidden_states = hidden_states.flatten(2).transpose(1, 2) residual = hidden_states # FFN hidden_states = self.norm2(hidden_states) hidden_states = self.ffn(hidden_states) hidden_states = residual + self.drop_path2(hidden_states) hidden_states = hidden_states.transpose(1, 2).view(batch_size, embed_dim, height, width) return hidden_states class Florence2VisionBlock(nn.Module): def __init__( self, config: Florence2VisionConfig, stage_idx: int, spatial_drop_path_rate: float, channel_drop_path_rate: float, ): super().__init__() self.spatial_block = Florence2VisionSpatialBlock( config=config, stage_idx=stage_idx, drop_path_rate=spatial_drop_path_rate, ) self.channel_block = Florence2VisionChannelBlock( config=config, stage_idx=stage_idx, drop_path_rate=channel_drop_path_rate, ) def forward(self, hidden_states: torch.Tensor): hidden_states = self.spatial_block(hidden_states) hidden_states = self.channel_block(hidden_states) return hidden_states @auto_docstring class Florence2VisionPreTrainedModel(PreTrainedModel): config_class = Florence2VisionConfig main_input_name = "pixel_values" _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _can_compile_fullgraph = True @auto_docstring class Florence2VisionBackbone(Florence2VisionPreTrainedModel): def __init__(self, config: Florence2VisionConfig): super().__init__(config) self.config = config self.embed_dim = config.embed_dim self.num_heads = config.num_heads self.num_groups = config.num_groups self.num_stages = len(self.embed_dim) if not (self.num_stages == len(self.num_heads) == len(self.num_groups)): raise ValueError( f"Expected self.num_stages ({self.num_stages}) == " f"len(self.num_heads) ({len(self.num_heads)}) == " f"len(self.num_groups) ({len(self.num_groups)})" ) dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths) * 2, device="cpu")] depth_offset = 0 convs = [] blocks = [] for stage_idx in range(self.num_stages): conv_embed = Florence2VisionConvEmbed( config=config, stage_idx=stage_idx, ) convs.append(conv_embed) block = nn.ModuleList( Florence2VisionBlock( config=config, stage_idx=stage_idx, spatial_drop_path_rate=dpr[depth_offset + block_idx * 2], channel_drop_path_rate=dpr[depth_offset + block_idx * 2 + 1], ) for block_idx in range(config.depths[stage_idx]) ) blocks.append(block) depth_offset += config.depths[stage_idx] * 2 self.convs = nn.ModuleList(convs) self.blocks = nn.ModuleList(blocks) # Initialize weights and apply final processing self.post_init() def forward(self, hidden_states: torch.Tensor): for conv, block in zip(self.convs, self.blocks): hidden_states = conv(hidden_states) for layer in block: hidden_states = layer(hidden_states) return hidden_states class Florence2MultiModalProjector(nn.Module): def __init__(self, config: Florence2Config): super().__init__() self.vision_embedding_dim = config.vision_config.embed_dim[-1] self.vision_projection_dim = config.vision_config.projection_dim self.image_projection = nn.Linear(self.vision_embedding_dim, self.vision_projection_dim, bias=False) self.image_proj_norm = nn.LayerNorm(self.vision_projection_dim) self.image_position_embed = Florence2VisionLearnedAbsolutePositionEmbedding2D(config=config) self.visual_temporal_embed = Florence2VisionPositionalEmbeddingCosine1D(config=config) def forward(self, image_features): position_features = image_features + self.image_position_embed(image_features) position_features = position_features.flatten(2).transpose(1, 2) temporal_features = self.visual_temporal_embed(position_features[:, :1, :]) temporal_features = temporal_features.unsqueeze(1) visual_token_features = position_features + temporal_features visual_token_features = visual_token_features.unsqueeze(1) spatial_image_features = visual_token_features.mean(dim=2) temporal_image_features = visual_token_features.mean(dim=1) image_features = torch.cat([spatial_image_features, temporal_image_features], dim=1) image_features = self.image_projection(image_features) image_features = self.image_proj_norm(image_features) return image_features @dataclass @auto_docstring( custom_intro=""" Base class for Florence-2 base model's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. """ ) class Florence2Seq2SeqModelOutput(Seq2SeqModelOutput): r""" image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_image_tokens, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ image_hidden_states: Optional[torch.FloatTensor] = None @dataclass @auto_docstring( custom_intro=""" Base class for Florence-2 model's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. """ ) class Florence2Seq2SeqLMOutput(Seq2SeqLMOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_image_tokens, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ image_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None @auto_docstring class Florence2PreTrainedModel(LlavaPreTrainedModel): config_class = Florence2Config _supports_attention_backend = False @auto_docstring( custom_intro=""" Florence-2 is a vision model for captioning, detection, and segmentation. """ ) class Florence2Model(LlavaModel): _checkpoint_conversion_mapping = {} _tied_weights_keys = [ "language_model.encoder.embed_tokens.weight", "language_model.decoder.embed_tokens.weight", ] def __init__(self, config: Florence2Config): super().__init__(config) self.vision_tower = Florence2VisionBackbone(config=config.vision_config) def get_encoder(self): return self.language_model.get_encoder() def get_decoder(self): return self.language_model.get_decoder() def get_image_features(self, pixel_values: torch.Tensor, **kwargs): """ Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`): The tensors corresponding to the input images. Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). """ image_features = self.vision_tower(pixel_values, **kwargs) image_embeds = self.multi_modal_projector(image_features) return image_embeds @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, Florence2Seq2SeqModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if pixel_values is not None: image_features = self.get_image_features(pixel_values) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) encoder_outputs = self.language_model.encoder( attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) if decoder_input_ids is None: decoder_start_token_id = self.config.text_config.decoder_start_token_id decoder_input_ids = torch.ones((inputs_embeds.size()[0], 1), dtype=torch.long, device=inputs_embeds.device) decoder_input_ids *= decoder_start_token_id decoder_outputs = self.language_model.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, return_dict=True, **kwargs, ) return Florence2Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, image_hidden_states=image_features if pixel_values is not None else None, ) @auto_docstring( custom_intro=""" Florence-2 is a vision model for captioning, detection, and segmentation. """ ) class Florence2ForConditionalGeneration(LlavaForConditionalGeneration): _checkpoint_conversion_mapping = {} _tied_weights_keys = [ "model.language_model.encoder.embed_tokens.weight", "model.language_model.decoder.embed_tokens.weight", "lm_head.weight", ] def get_encoder(self): return self.model.get_encoder() def get_image_features(self, pixel_values: torch.Tensor, **kwargs): return self.model.get_image_features(pixel_values=pixel_values, **kwargs) @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Florence2Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Florence2ForConditionalGeneration >>> model = Florence2ForConditionalGeneration.from_pretrained("microsoft/Florence-2-large") >>> processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large") >>> prompt = "<CAPTION>" >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=prompt, images=image, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_length=100) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "A green car parked in front of a yellow building." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) return Florence2Seq2SeqLMOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, image_hidden_states=outputs.image_hidden_states, ) def get_placeholder_mask( self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor ): return self.model.get_placeholder_mask( input_ids=input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) def _prepare_encoder_decoder_kwargs_for_generation( self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str], generation_config, ) -> dict[str, Any]: # override to handle merging image and text embeddings before passing to language encoder inputs_embeds = model_kwargs.pop("inputs_embeds", None) pixel_values = model_kwargs.pop("pixel_values", None) if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(inputs_tensor) if pixel_values is not None: image_features = self.get_image_features(pixel_values) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask( inputs_tensor, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) model_kwargs["inputs_embeds"] = inputs_embeds model_kwargs = super()._prepare_encoder_decoder_kwargs_for_generation( None, model_kwargs, model_input_name, generation_config ) model_kwargs.pop("inputs_embeds", None) return model_kwargs __all__ = [ "Florence2Config", "Florence2Processor", "Florence2VisionConfig", "Florence2Model", "Florence2ForConditionalGeneration", "Florence2PreTrainedModel", "Florence2VisionBackbone", "Florence2VisionPreTrainedModel", ]
transformers/src/transformers/models/florence2/modular_florence2.py/0
{ "file_path": "transformers/src/transformers/models/florence2/modular_florence2.py", "repo_id": "transformers", "token_count": 34892 }
484
# coding=utf-8 # Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for FSMT.""" import json import os import re import unicodedata from typing import Optional from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "src_vocab_file": "vocab-src.json", "tgt_vocab_file": "vocab-tgt.json", "merges_file": "merges.txt", } def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def replace_unicode_punct(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl """ text = text.replace(",", ",") text = re.sub(r"。\s*", ". ", text) text = text.replace("、", ",") text = text.replace("”", '"') text = text.replace("“", '"') text = text.replace("∶", ":") text = text.replace(":", ":") text = text.replace("?", "?") text = text.replace("《", '"') text = text.replace("》", '"') text = text.replace(")", ")") text = text.replace("!", "!") text = text.replace("(", "(") text = text.replace(";", ";") text = text.replace("1", "1") text = text.replace("」", '"') text = text.replace("「", '"') text = text.replace("0", "0") text = text.replace("3", "3") text = text.replace("2", "2") text = text.replace("5", "5") text = text.replace("6", "6") text = text.replace("9", "9") text = text.replace("7", "7") text = text.replace("8", "8") text = text.replace("4", "4") text = re.sub(r".\s*", ". ", text) text = text.replace("~", "~") text = text.replace("’", "'") text = text.replace("…", "...") text = text.replace("━", "-") text = text.replace("〈", "<") text = text.replace("〉", ">") text = text.replace("【", "[") text = text.replace("】", "]") text = text.replace("%", "%") return text def remove_non_printing_char(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl """ output = [] for char in text: cat = unicodedata.category(char) if cat.startswith("C"): continue output.append(char) return "".join(output) # Porting notes: # this one is modeled after XLMTokenizer # # added: # - src_vocab_file, # - tgt_vocab_file, # - langs, class FSMTTokenizer(PreTrainedTokenizer): """ Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following: - Moses preprocessing and tokenization. - Normalizing all inputs text. - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like "__classify__") to a vocabulary. - The argument `langs` defines a pair of languages. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: langs (`List[str]`, *optional*): A list of two languages to translate from and to, for instance `["en", "ru"]`. src_vocab_file (`str`, *optional*): File containing the vocabulary for the source language. tgt_vocab_file (`st`, *optional*): File containing the vocabulary for the target language. merges_file (`str`, *optional*): File containing the merges. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, langs=None, src_vocab_file=None, tgt_vocab_file=None, merges_file=None, do_lower_case=False, unk_token="<unk>", bos_token="<s>", sep_token="</s>", pad_token="<pad>", **kwargs, ): try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses self.src_vocab_file = src_vocab_file self.tgt_vocab_file = tgt_vocab_file self.merges_file = merges_file self.do_lower_case = do_lower_case # cache of sm.MosesPunctNormalizer instance self.cache_moses_punct_normalizer = {} # cache of sm.MosesTokenizer instance self.cache_moses_tokenizer = {} self.cache_moses_detokenizer = {} if langs and len(langs) == 2: self.src_lang, self.tgt_lang = langs else: raise ValueError( f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. " "Usually that means that tokenizer can't find a mapping for the given model path " "in and other maps of this tokenizer." ) with open(src_vocab_file, encoding="utf-8") as src_vocab_handle: self.encoder = json.load(src_vocab_handle) with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle: tgt_vocab = json.load(tgt_vocab_handle) self.decoder = {v: k for k, v in tgt_vocab.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__( langs=langs, src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, do_lower_case=do_lower_case, unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, **kwargs, ) # hack override def get_vocab(self) -> dict[str, int]: return self.get_src_vocab() # hack override @property def vocab_size(self) -> int: return self.src_vocab_size def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer return self.cache_moses_punct_normalizer[lang].normalize(text) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer return self.cache_moses_tokenizer[lang].tokenize( text, aggressive_dash_splits=True, return_str=False, escape=True ) def moses_detokenize(self, tokens, lang): if lang not in self.cache_moses_detokenizer: moses_detokenizer = self.sm.MosesDetokenizer(lang=lang) self.cache_moses_detokenizer[lang] = moses_detokenizer return self.cache_moses_detokenizer[lang].detokenize(tokens) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text @property def src_vocab_size(self): return len(self.encoder) @property def tgt_vocab_size(self): return len(self.decoder) def get_src_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def get_tgt_vocab(self): return dict(self.decoder, **self.added_tokens_decoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + "</w>",) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n </w>": word = "\n</w>" self.cache[token] = word return word def _tokenize(self, text, lang="en", bypass_tokenizer=False): """ Tokenize a string given language code using Moses. Details of tokenization: - [sacremoses](https://github.com/alvations/sacremoses): port of Moses - Install with `pip install sacremoses` Args: - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. Returns: List of tokens. """ # ignore `lang` which is currently isn't explicitly passed in tokenization_utils.py and always results in lang=en # if lang != self.src_lang: # raise ValueError(f"Expected lang={self.src_lang}, but got {lang}") lang = self.src_lang if self.do_lower_case: text = text.lower() if bypass_tokenizer: text = text.split() else: text = self.moses_pipeline(text, lang=lang) text = self.moses_tokenize(text, lang=lang) split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" # remove BPE tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens] tokens = "".join(tokens).split() # detokenize text = self.moses_detokenize(tokens, self.tgt_lang) return text def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A FAIRSEQ Transformer sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] # no bos used in fairseq if token_ids_1 is None: return token_ids_0 + sep return token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) # no bos used in fairseq if token_ids_1 is not None: return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return ([0] * len(token_ids_0)) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return src_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["src_vocab_file"] ) tgt_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["tgt_vocab_file"] ) merges_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(src_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") with open(tgt_vocab_file, "w", encoding="utf-8") as f: tgt_vocab = {v: k for k, v in self.decoder.items()} f.write(json.dumps(tgt_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merges_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return src_vocab_file, tgt_vocab_file, merges_file def __getstate__(self): state = self.__dict__.copy() state["sm"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses __all__ = ["FSMTTokenizer"]
transformers/src/transformers/models/fsmt/tokenization_fsmt.py/0
{ "file_path": "transformers/src/transformers/models/fsmt/tokenization_fsmt.py", "repo_id": "transformers", "token_count": 8097 }
485
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import warnings import torch from accelerate import init_empty_weights from transformers import GemmaConfig, GemmaForCausalLM, GemmaTokenizer try: from transformers import GemmaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) GemmaTokenizerFast = None """ Sample usage: ``` python src/transformers/models/gemma/convert_gemma_weights_to_hf.py \ --input_dir /path/to/downloaded/gemma/weights --model_size 7B --output_dir /output/path ``` Thereafter, models can be loaded via: ```py from transformers import GemmaForCausalLM, GemmaTokenizerFast model = GemmaForCausalLM.from_pretrained("/output/path") tokenizer = GemmaTokenizerFast.from_pretrained("/output/path") ``` Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). """ gemma_2b_config = GemmaConfig( num_hidden_layers=18, num_attention_heads=8, num_key_value_heads=1, hidden_size=2048, intermediate_size=16384, ) gemma_7b_config = GemmaConfig() CONFIG_MAPPING = {"2B": gemma_2b_config, "7B": gemma_7b_config} LAYER_NAME_MAPPING = {"embedder.weight": "model.embed_tokens.weight"} def write_model(save_path, input_base_path, config, safe_serialization=True, push_to_hub=False, dtype=torch.float32): num_attn_heads = config.num_attention_heads hidden_size = config.hidden_size num_kv_heads = config.num_key_value_heads head_dim = config.head_dim print(f"Fetching all parameters from the checkpoint at '{input_base_path}'") model_state_dict = torch.load(input_base_path, map_location="cpu", weights_only=True)["model_state_dict"] model_state_dict.pop("freqs_cis") state_dict = {} for k, v in model_state_dict.items(): if "qkv_proj" in k: if num_kv_heads == 1: v = v.reshape(num_attn_heads + num_kv_heads * 2, head_dim, hidden_size) q_proj = v[:num_attn_heads, ...] k_proj = v[num_attn_heads : num_attn_heads + num_kv_heads, ...].repeat(num_kv_heads, 1, 1) v_proj = v[-num_kv_heads:, ...].repeat(num_kv_heads, 1, 1) state_dict[k.replace("qkv_proj", "q_proj")] = q_proj.reshape( num_attn_heads * head_dim, hidden_size ).clone() state_dict[k.replace("qkv_proj", "k_proj")] = k_proj.reshape( num_kv_heads * head_dim, hidden_size ).clone() state_dict[k.replace("qkv_proj", "v_proj")] = v_proj[0].clone() else: q_proj, k_proj, v_proj = torch.split(v, v.shape[0] // 3, 0) state_dict[k.replace("qkv_proj", "q_proj")] = q_proj.reshape( num_attn_heads * head_dim, hidden_size ).clone() state_dict[k.replace("qkv_proj", "k_proj")] = k_proj.reshape( num_kv_heads * head_dim, hidden_size ).clone() state_dict[k.replace("qkv_proj", "v_proj")] = v_proj.clone() elif k == "embedder.weight": state_dict[LAYER_NAME_MAPPING[k]] = v state_dict["lm_head.weight"] = v else: state_dict[k] = v torch.set_default_dtype(dtype) print("Loading the checkpoint in a Gemma model.") with init_empty_weights(): model = GemmaForCausalLM(config) model.load_state_dict(state_dict, assign=True, strict=False) model.config.dtype = torch.float32 del model.config._name_or_path print("Saving in the Transformers format.") if push_to_hub: print(f"pushing the model to {save_path}") model.push_to_hub(save_path, safe_serialization=safe_serialization, private=True) else: model.save_pretrained(save_path, safe_serialization=safe_serialization) def write_tokenizer(input_tokenizer_path, save_path, push_to_hub=False): # Initialize the tokenizer based on the `spm` model tokenizer_class = GemmaTokenizer if GemmaTokenizerFast is None else GemmaTokenizerFast print(f"Saving a {tokenizer_class.__name__} to {save_path}.") tokenizer = tokenizer_class(input_tokenizer_path) if push_to_hub: tokenizer.push_to_hub(save_path) else: tokenizer.save_pretrained(save_path) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_checkpoint", help="Absolute path to the target Gemma weights.", required=True, ) parser.add_argument( "--tokenizer_checkpoint", help="Location of Gemma tokenizer model", ) parser.add_argument( "--model_size", default="7B", choices=["2B", "7B", "tokenizer_only"], help="'f' models correspond to the finetuned versions, and are specific to the Gemma2 official release. For more details on Gemma2, check out the original repo: https://huggingface.co/google/gemma-7b", ) parser.add_argument( "--output_dir", default="google/gemma-7b", help="Location to write HF model and tokenizer", ) parser.add_argument( "--pickle_serialization", help="Whether or not to save using `safetensors`.", action="store_true", default=False, ) parser.add_argument( "--convert_tokenizer", help="Whether or not to convert the tokenizer as well.", action="store_true", default=False, ) parser.add_argument( "--push_to_hub", help="Whether or not to push the model to the hub at `output_dir` instead of saving it locally.", action="store_true", default=False, ) parser.add_argument( "--dtype", default="float32", help="Target dtype of the converted model", ) args = parser.parse_args() if args.convert_tokenizer: if args.tokenizer_checkpoint is None: raise ValueError("Path to the tokenizer is required when passing --convert_tokenizer") spm_path = os.path.join(args.tokenizer_checkpoint) write_tokenizer(spm_path, args.output_dir, args.push_to_hub) config = CONFIG_MAPPING[args.model_size] dtype = getattr(torch, args.dtype) write_model( config=config, input_base_path=args.input_checkpoint, save_path=args.output_dir, safe_serialization=not args.pickle_serialization, push_to_hub=args.push_to_hub, dtype=dtype, ) if __name__ == "__main__": main()
transformers/src/transformers/models/gemma/convert_gemma_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/gemma/convert_gemma_weights_to_hf.py", "repo_id": "transformers", "token_count": 3117 }
486
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/gemma3/modular_gemma3.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_gemma3.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from collections.abc import Callable from dataclasses import dataclass from typing import Optional, Union import torch import torch.nn as nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...configuration_utils import PretrainedConfig from ...generation import GenerationMixin from ...masking_utils import create_causal_mask, create_masks_for_generate, create_sliding_window_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging from ...utils.deprecation import deprecate_kwarg from ...utils.generic import check_model_inputs from ..auto import AutoModel from .configuration_gemma3 import Gemma3Config, Gemma3TextConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Base class for Gemma3 outputs, with hidden states and attentions. """ ) class Gemma3ModelOutputWithPast(BaseModelOutputWithPast): r""" past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ image_hidden_states: Optional[torch.FloatTensor] = None @dataclass @auto_docstring( custom_intro=""" Base class for Gemma3 causal language model (or autoregressive) outputs. """ ) class Gemma3CausalLMOutputWithPast(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder after projecting last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None class Gemma3TextScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: float = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.register_buffer("embed_scale", torch.tensor(embed_scale), persistent=False) def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale.to(self.weight.dtype) class Gemma3MLP(nn.Module): def __init__(self, config: Gemma3TextConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_activation] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj class Gemma3RMSNorm(nn.Module): def __init__(self, dim: int, eps: float = 1e-6): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.zeros(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()) # Llama does x.to(float16) * w whilst Gemma3 is (x * w).to(float16) # See https://github.com/huggingface/transformers/pull/29402 output = output * (1.0 + self.weight.float()) return output.type_as(x) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.eps}" class Gemma3RotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: Gemma3TextConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], dropout: float = 0.0, scaling: Optional[float] = None, softcap: Optional[float] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: if scaling is None: scaling = module.head_dim**-0.5 key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if softcap is not None: attn_weights = attn_weights / softcap attn_weights = torch.tanh(attn_weights) attn_weights = attn_weights * softcap if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class Gemma3Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Gemma3TextConfig, layer_idx: int): super().__init__() self.is_sliding = config.layer_types[layer_idx] == "sliding_attention" self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = config.query_pre_attn_scalar**-0.5 self.attention_dropout = self.config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) self.attn_logit_softcapping = self.config.attn_logit_softcapping self.sliding_window = config.sliding_window if self.is_sliding else None self.q_norm = Gemma3RMSNorm(dim=config.head_dim, eps=config.rms_norm_eps) self.k_norm = Gemma3RMSNorm(dim=config.head_dim, eps=config.rms_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: torch.Tensor, attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) query_states = self.q_norm(query_states) key_states = self.k_norm(key_states) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=self.attention_dropout if self.training else 0.0, scaling=self.scaling, sliding_window=self.sliding_window, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Gemma3DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Gemma3TextConfig, layer_idx: int): super().__init__() self.config = config self.hidden_size = config.hidden_size self.layer_idx = layer_idx self.attention_type = config.layer_types[layer_idx] self.self_attn = Gemma3Attention(config=config, layer_idx=layer_idx) self.mlp = Gemma3MLP(config) self.input_layernorm = Gemma3RMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Gemma3RMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.pre_feedforward_layernorm = Gemma3RMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.post_feedforward_layernorm = Gemma3RMSNorm(self.hidden_size, eps=config.rms_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings_global: torch.Tensor, position_embeddings_local: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # apply global RoPE to non-sliding layer only if self.self_attn.is_sliding: position_embeddings = position_embeddings_local else: position_embeddings = position_embeddings_global hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.pre_feedforward_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = self.post_feedforward_layernorm(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs @auto_docstring class Gemma3PreTrainedModel(PreTrainedModel): config: Gemma3Config base_model_prefix = "" supports_gradient_checkpointing = True _no_split_modules = [ "Gemma3DecoderLayer", "SiglipVisionEmbeddings", "SiglipEncoderLayer", "SiglipMultiheadAttentionPoolingHead", ] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": Gemma3DecoderLayer, "attentions": Gemma3Attention, } def _init_weights(self, module): super()._init_weights(module) if isinstance(module, Gemma3MultiModalProjector): module.mm_input_projection_weight.data.zero_() @auto_docstring class Gemma3TextModel(Gemma3PreTrainedModel): config: Gemma3TextConfig def __init__(self, config: Gemma3TextConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size # Gemma3 downcasts the below to bfloat16, causing sqrt(3072)=55.4256 to become 55.5. See https://github.com/huggingface/transformers/pull/29402 self.embed_tokens = Gemma3TextScaledWordEmbedding( config.vocab_size, config.hidden_size, self.padding_idx, embed_scale=self.config.hidden_size**0.5 ) self.layers = nn.ModuleList( [Gemma3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Gemma3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Gemma3RotaryEmbedding(config=config) self.gradient_checkpointing = False # TODO: raushan fix this after RoPE refactor. For now we hack it by reassigning thetas # when we want to create a local RoPE layer. Config defaults should hold values for global RoPE config = copy.deepcopy(config) config.rope_theta = config.rope_local_base_freq config.rope_scaling = {"rope_type": "default"} self.rotary_emb_local = Gemma3RotaryEmbedding(config=config) # Initialize weights and apply final processing self.post_init() @check_model_inputs @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None and not self.training: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device, ) if position_ids is None: position_ids = cache_position.unsqueeze(0) # It may already have been prepared by e.g. `generate` if not isinstance(causal_mask_mapping := attention_mask, dict): # Prepare mask arguments mask_kwargs = { "config": self.config, "input_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } # Create the masks causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs), } # embed positions hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings_global = self.rotary_emb(hidden_states, position_ids) position_embeddings_local = self.rotary_emb_local(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, position_embeddings_global=position_embeddings_global, position_embeddings_local=position_embeddings_local, attention_mask=causal_mask_mapping[decoder_layer.attention_type], position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) @auto_docstring class Gemma3ForCausalLM(Gemma3PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} config: Gemma3TextConfig base_model_prefix = "language_model" def __init__(self, config: Gemma3TextConfig): super().__init__(config) self.model = Gemma3TextModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> CausalLMOutputWithPast: r""" Example: ```python >>> from transformers import AutoTokenizer, Gemma3ForCausalLM >>> model = Gemma3ForCausalLM.from_pretrained("google/gemma-2-9b") >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b") >>> prompt = "What is your favorite condiment?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "What is your favorite condiment?" ```""" if self.training and self.config._attn_implementation != "eager": logger.warning_once( "It is strongly recommended to train Gemma3 models with the `eager` attention implementation " f"instead of `{self.config._attn_implementation}`. Use `eager` with `AutoModelForCausalLM.from_pretrained('<path-to-checkpoint>', attn_implementation='eager')`." ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) if self.config.final_logit_softcapping is not None: logits = logits / self.config.final_logit_softcapping logits = torch.tanh(logits) logits = logits * self.config.final_logit_softcapping loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class Gemma3MultiModalProjector(nn.Module): def __init__(self, config: Gemma3Config): super().__init__() self.mm_input_projection_weight = nn.Parameter( torch.zeros(config.vision_config.hidden_size, config.text_config.hidden_size) ) self.mm_soft_emb_norm = Gemma3RMSNorm( config.vision_config.hidden_size, eps=config.vision_config.layer_norm_eps ) self.patches_per_image = int(config.vision_config.image_size // config.vision_config.patch_size) self.tokens_per_side = int(config.mm_tokens_per_image**0.5) self.kernel_size = self.patches_per_image // self.tokens_per_side self.avg_pool = nn.AvgPool2d(kernel_size=self.kernel_size, stride=self.kernel_size) def forward(self, vision_outputs: torch.Tensor): batch_size, _, seq_length = vision_outputs.shape reshaped_vision_outputs = vision_outputs.transpose(1, 2) reshaped_vision_outputs = reshaped_vision_outputs.reshape( batch_size, seq_length, self.patches_per_image, self.patches_per_image ) reshaped_vision_outputs = reshaped_vision_outputs.contiguous() pooled_vision_outputs = self.avg_pool(reshaped_vision_outputs) pooled_vision_outputs = pooled_vision_outputs.flatten(2) pooled_vision_outputs = pooled_vision_outputs.transpose(1, 2) normed_vision_outputs = self.mm_soft_emb_norm(pooled_vision_outputs) projected_vision_outputs = torch.matmul(normed_vision_outputs, self.mm_input_projection_weight) return projected_vision_outputs.type_as(vision_outputs) def token_type_ids_mask_function( token_type_ids: Optional[torch.Tensor], image_group_ids: Optional[torch.Tensor], tokens_per_image: int, ) -> Optional[Callable]: """ This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths, not start and end indices. """ # Do not return an additional mask in this case if token_type_ids is None: return None def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool: # If it's 1 for both query and key/value, we are in an image block # NOTE: static cache shape goes beyond input seq length, while token_type_ids.shape[1] == input seq length # Since vmap doesn't support `if statement` we workaround it with `torch.where` safe_idx = torch.where(kv_idx < token_type_ids.shape[1], kv_idx, 0) token_type_ids_at_kv_idx = token_type_ids[batch_idx, safe_idx] token_type_ids_at_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], token_type_ids_at_kv_idx, 0) image_group_ids_at_kv_idx = image_group_ids[batch_idx, safe_idx] image_group_ids_at_kv_idx = torch.where(kv_idx < image_group_ids.shape[1], image_group_ids_at_kv_idx, -1) is_image_block = (token_type_ids[batch_idx, q_idx] == 1) & (token_type_ids_at_kv_idx == 1) same_image_block = image_group_ids[batch_idx, q_idx] == image_group_ids_at_kv_idx # This is bidirectional attention whenever we are dealing with image tokens return is_image_block & same_image_block return inner_mask @auto_docstring( custom_intro=""" The Base Gemma3 model which consists of a vision backbone and a language model withou language modeling head., """ ) class Gemma3Model(Gemma3PreTrainedModel): _checkpoint_conversion_mapping = {"language_model.model": "language_model"} # we are filtering the logits/labels so we shouldn't divide the loss based on num_items_in_batch accepts_loss_kwargs = False def __init__(self, config: Gemma3Config): super().__init__(config) self.vision_tower = AutoModel.from_config(config=config.vision_config) self.multi_modal_projector = Gemma3MultiModalProjector(config) self.vocab_size = config.text_config.vocab_size language_model = AutoModel.from_config(config=config.text_config) self.language_model = language_model self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_decoder(self, decoder): self.language_model = decoder def get_decoder(self): return self.language_model def get_image_features(self, pixel_values: torch.Tensor) -> torch.Tensor: """ Projects the last hidden state from the vision model into language model space. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`) The tensors corresponding to the input images. Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). """ vision_outputs = self.vision_tower(pixel_values=pixel_values).last_hidden_state image_features = self.multi_modal_projector(vision_outputs) return image_features def get_placeholder_mask( self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor ): """ Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) n_image_features = image_features.shape[0] * image_features.shape[1] if inputs_embeds[special_image_mask].numel() != image_features.numel(): raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" ) return special_image_mask @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None, token_type_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **lm_kwargs, ) -> Union[tuple, Gemma3ModelOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Gemma3ForConditionalGeneration >>> model = Gemma3ForConditionalGeneration.from_pretrained("google/gemma32-3b-mix-224") >>> processor = AutoProcessor.from_pretrained("google/gemma32-3b-mix-224") >>> prompt = "Where is the cat standing?" >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, text=prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs,) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Where is the cat standing?\nsnow" ```""" if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Replace image id with PAD if the image token if OOV, to avoid index-errors if input_ids is not None and self.config.image_token_id >= self.vocab_size: special_image_mask = input_ids == self.config.image_token_id llm_input_ids = input_ids.clone() llm_input_ids[special_image_mask] = 0 else: llm_input_ids = input_ids if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(llm_input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) # Merge text and images if pixel_values is not None: image_features = self.get_image_features(pixel_values) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) # It may already have been prepared by e.g. `generate` if not isinstance(causal_mask_mapping := attention_mask, dict): # Prepare mask arguments mask_kwargs = { "config": self.config.get_text_config(), "input_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } if token_type_ids is not None and inputs_embeds.shape[1] != 1: # We need to pass an additional mask function to account for token type ids, and it needs to be an `or` # First find where a new image block starts: 1 if image and previous not image # The images cannot attend to future images, but can attend to all prev images and to itself bidirectionally is_image = (token_type_ids == 1).to(cache_position.device) new_image_start = is_image & ~nn.functional.pad(is_image, (1, 0), value=0)[:, :-1] image_group_ids = torch.cumsum(new_image_start.int(), dim=1) - 1 image_group_ids = torch.where( is_image, image_group_ids, torch.full_like(token_type_ids, -1, device=is_image.device) ) mask_kwargs["or_mask_function"] = token_type_ids_mask_function( token_type_ids.to(cache_position.device), image_group_ids, self.config.mm_tokens_per_image ) # Create the masks causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs), } outputs = self.language_model( attention_mask=causal_mask_mapping, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **lm_kwargs, ) return Gemma3ModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values if use_cache else None, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_features if pixel_values is not None else None, ) @auto_docstring( custom_intro=""" The Base Gemma3 model which consists of a vision backbone and a language model without language modeling head., """ ) class Gemma3ForConditionalGeneration(Gemma3PreTrainedModel, GenerationMixin): _checkpoint_conversion_mapping = { "^language_model.model": "model.language_model", "^vision_tower": "model.vision_tower", "^multi_modal_projector": "model.multi_modal_projector", "^language_model.lm_head": "lm_head", } _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: Gemma3Config): super().__init__(config) self.model = Gemma3Model(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def set_decoder(self, decoder): self.model.set_decoder(decoder) def get_decoder(self): return self.model.get_decoder() def get_image_features(self, pixel_values): return self.model.get_image_features(pixel_values) # Make modules available through conditional class for BC @property def language_model(self): return self.model.language_model @property def vision_tower(self): return self.model.vision_tower @property def multi_modal_projector(self): return self.model.multi_modal_projector @auto_docstring def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None, token_type_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **lm_kwargs, ) -> Union[tuple, Gemma3CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Gemma3ForConditionalGeneration >>> model = Gemma3ForConditionalGeneration.from_pretrained("google/gemma-3-4b-it") >>> processor = AutoProcessor.from_pretrained("google/gemma-3-4b-it") >>> messages = [ ... { ... "role": "system", ... "content": [ ... {"type": "text", "text": "You are a helpful assistant."} ... ] ... }, ... { ... "role": "user", "content": [ ... {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"}, ... {"type": "text", "text": "Where is the cat standing?"}, ... ] ... }, ... ] >>> inputs = processor.apply_chat_template( ... messages, ... tokenize=True, ... return_dict=True, ... return_tensors="pt", ... add_generation_prompt=True ... ) >>> # Generate >>> generate_ids = model.generate(**inputs) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "user\nYou are a helpful assistant.\n\n\n\n\n\nWhere is the cat standing?\nmodel\nBased on the image, the cat is standing in a snowy area, likely outdoors. It appears to" ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, token_type_ids=token_type_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **lm_kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: # Upcast to float if we need to compute the loss to avoid potential precision issues logits = logits.float() shift_logits = logits[..., :-1, :] shift_labels = labels[..., 1:] if attention_mask is not None: # we use the input attention mask to shift the logits and labels, because it is 2D. # we also crop attn mask in case it is longer, which happens in PrefixTuning with peft shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device) shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous() shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous() else: shift_logits = shift_logits.contiguous() shift_labels = shift_labels.contiguous() # Flatten the tokens loss_fct = nn.CrossEntropyLoss() flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size) flat_labels = shift_labels.view(-1).to(shift_logits.device) loss = loss_fct(flat_logits, flat_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Gemma3CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, cache_position=None, position_ids=None, pixel_values=None, attention_mask=None, token_type_ids=None, use_cache=True, logits_to_keep=None, labels=None, **kwargs, ): # Overwritten -- custom `position_ids` and `pixel_values` handling model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, cache_position=cache_position, use_cache=use_cache, logits_to_keep=logits_to_keep, token_type_ids=token_type_ids, **kwargs, ) # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model. NOTE: use_cache=False needs pixel_values always if cache_position[0] == 0: model_inputs["pixel_values"] = pixel_values return model_inputs @staticmethod def create_masks_for_generate( config: PretrainedConfig, input_embeds: torch.Tensor, attention_mask: Optional[torch.Tensor], cache_position: torch.Tensor, past_key_values: Optional[Cache], position_ids: Optional[torch.Tensor], token_type_ids: Optional[torch.Tensor] = None, **kwargs, ) -> dict: # Prepare mask arguments mask_kwargs = { "config": config.get_text_config(), "input_embeds": input_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } # Add the token type ids mask for generate as well if token_type_ids is not None and input_embeds.shape[1] != 1: # We need to pass an additional mask function to account for token type ids, and it needs to be an `or` # First find where a new image block starts: 1 if image and previous not image # The images cannot attend to future images, but can attend to all prev images and to itself bidirectionally is_image = (token_type_ids == 1).to(cache_position.device) new_image_start = is_image & ~nn.functional.pad(is_image, (1, 0), value=0)[:, :-1] image_group_ids = torch.cumsum(new_image_start.int(), dim=1) - 1 image_group_ids = torch.where(is_image, image_group_ids, torch.full_like(token_type_ids, -1)) mask_kwargs["or_mask_function"] = token_type_ids_mask_function( token_type_ids.to(cache_position.device), image_group_ids, config.mm_tokens_per_image ) return create_masks_for_generate(**mask_kwargs) class Gemma3ForSequenceClassification(Gemma3PreTrainedModel): _checkpoint_conversion_mapping = { "^language_model.model": "model.language_model", "^vision_tower": "model.vision_tower", "^multi_modal_projector": "model.multi_modal_projector", } def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = Gemma3Model(config) self.score = nn.Linear(config.text_config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> SequenceClassifierOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ transformer_outputs = self.model( input_ids, attention_mask=attention_mask, pixel_values=pixel_values, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, token_type_ids=token_type_ids, use_cache=use_cache, **kwargs, ) hidden_states = transformer_outputs.last_hidden_state logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.text_config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.text_config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.text_config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config) return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = [ "Gemma3PreTrainedModel", "Gemma3TextModel", "Gemma3ForCausalLM", "Gemma3ForConditionalGeneration", "Gemma3Model", "Gemma3ForSequenceClassification", ]
transformers/src/transformers/models/gemma3/modeling_gemma3.py/0
{ "file_path": "transformers/src/transformers/models/gemma3/modeling_gemma3.py", "repo_id": "transformers", "token_count": 24990 }
487
# coding=utf-8 # Copyright 2024 The GLM & ZhipuAI team and HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig class GlmConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GlmModel`]. It is used to instantiate an Glm model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Glm-4-9b-chat. e.g. [THUDM/glm-4-9b-chat](https://huggingface.co/THUDM/glm-4-9b-chat) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 151552): Vocabulary size of the Glm model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GlmModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 13696): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 40): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 2): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. partial_rotary_factor (`float`, *optional*, defaults to 0.5): The factor of the partial rotary position. head_dim (`int`, *optional*, defaults to 128): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The legacy activation function. It is overwritten by the `hidden_activation`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 131072): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1.5625e-07): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. pad_token_id (`int`, *optional*, defaults to 151329): Padding token id. eos_token_id (`int` | `list`, *optional*, defaults to `[151329, 151336, 151338]`): End of stream token id. bos_token_id (`int`, *optional*): Beginning of stream token id. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `True`): Whether to use a bias in the query, key, value and output projection layers during self-attention. ```python >>> from transformers import GlmModel, GlmConfig >>> # Initializing a Glm glm-4-9b-chat style configuration >>> configuration = GlmConfig() >>> # Initializing a model from the glm-4-9b-chat style configuration >>> model = GlmModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "glm" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_up_proj": "colwise_rep", # we need to replicate here due to the `chunk` operation "layers.*.mlp.down_proj": "rowwise_rep", # we need to replicate here due to the `chunk` operation } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size=151552, hidden_size=4096, intermediate_size=13696, num_hidden_layers=40, num_attention_heads=32, num_key_value_heads=2, partial_rotary_factor=0.5, head_dim=128, hidden_act="silu", attention_dropout=0.0, max_position_embeddings=131072, initializer_range=0.02, rms_norm_eps=0.00000015625, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, pad_token_id=151329, eos_token_id=[151329, 151336, 151338], bos_token_id=None, attention_bias=True, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.partial_rotary_factor = partial_rotary_factor self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["GlmConfig"]
transformers/src/transformers/models/glm/configuration_glm.py/0
{ "file_path": "transformers/src/transformers/models/glm/configuration_glm.py", "repo_id": "transformers", "token_count": 2977 }
488
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" from typing import Optional from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpt2 import GPT2Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} class GPT2TokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" GPT-2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import GPT2TokenizerFast >>> tokenizer = GPT2TokenizerFast.from_pretrained("openai-community/gpt2") >>> tokenizer("Hello world")["input_ids"] [15496, 995] >>> tokenizer(" Hello world")["input_ids"] [18435, 995] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): Path to the vocabulary file. merges_file (`str`, *optional*): Path to the merges file. tokenizer_file (`str`, *optional*): Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (GPT2 tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = GPT2Tokenizer def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, **kwargs, ): super().__init__( vocab_file=vocab_file, merges_file=merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs, ) self.add_bos_token = kwargs.pop("add_bos_token", False) def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) __all__ = ["GPT2TokenizerFast"]
transformers/src/transformers/models/gpt2/tokenization_gpt2_fast.py/0
{ "file_path": "transformers/src/transformers/models/gpt2/tokenization_gpt2_fast.py", "repo_id": "transformers", "token_count": 1978 }
489
# coding=utf-8 # Copyright 2022 ABEJA, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GPTNeoX Japanese model configuration""" from ...configuration_utils import PretrainedConfig from ...modeling_rope_utils import rope_config_validation from ...utils import logging logger = logging.get_logger(__name__) class GPTNeoXJapaneseConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTNeoXModelJapanese`]. It is used to instantiate a GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTNeoXJapanese [abeja/gpt-neox-japanese-2.7b](https://huggingface.co/abeja/gpt-neox-japanese-2.7b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Default configs is set as 2.7B model Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the GPTNeoXJapanese model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTNeoXJapanese`]. hidden_size (`int`, *optional*, defaults to 2560): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. intermediate_multiple_size (`int`, *optional*, defaults to 4): Dimension of the "intermediate" layer in the Transformer encoder is calculated by hidden_size * intermediate_multiple_size. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. rotary_pct (`float`, *optional*, defaults to 1.00): percentage of hidden dimensions to allocate to rotary embeddings rotary_emb_base (`int`, *optional*, defaults to 10000) base for computing rotary embeddings frequency max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the hidden layer. Example: ```python >>> from transformers import GPTNeoXJapaneseConfig, GPTNeoXJapaneseModel >>> # Initializing a GPTNeoXJapanese gpt-neox-japanese-2.7b style configuration >>> configuration = GPTNeoXJapaneseConfig() >>> # Initializing a model (with random weights) from the gpt-neox-japanese-2.7b style configuration >>> model = GPTNeoXJapaneseModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gpt_neox_japanese" def __init__( self, vocab_size=32000, hidden_size=2560, num_hidden_layers=32, num_attention_heads=32, intermediate_multiple_size=4, hidden_act="gelu", rotary_pct=1.00, rotary_emb_base=10000, max_position_embeddings=2048, initializer_range=0.02, layer_norm_eps=1e-5, use_cache=True, bos_token_id=31996, eos_token_id=31999, rope_scaling=None, attention_dropout=0.1, hidden_dropout=0.0, **kwargs, ): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_multiple_size = intermediate_multiple_size self.hidden_act = hidden_act self.rotary_pct = rotary_pct self.partial_rotary_factor = rotary_pct self.rotary_emb_base = rotary_emb_base self.rope_theta = rotary_emb_base self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.rope_scaling = rope_scaling self.attention_dropout = attention_dropout self.hidden_dropout = hidden_dropout # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, move it to 'rope_type'. if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self) __all__ = ["GPTNeoXJapaneseConfig"]
transformers/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py/0
{ "file_path": "transformers/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py", "repo_id": "transformers", "token_count": 3564 }
490
# coding=utf-8 # Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 GroupViT model.""" from __future__ import annotations import collections.abc import math from dataclasses import dataclass from typing import Any import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_tensorflow_probability_available, logging, replace_return_docstrings, ) from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig logger = logging.get_logger(__name__) # soft dependency if is_tensorflow_probability_available(): try: import tensorflow_probability as tfp # On the first call, check whether a compatible version of TensorFlow is installed # TensorFlow Probability depends on a recent stable release of TensorFlow _ = tfp.distributions.Normal(loc=0.0, scale=1.0) except ImportError: logger.error( "GroupViT models are not usable since `tensorflow_probability` can't be loaded. " "It seems you have `tensorflow_probability` installed with the wrong tensorflow version." "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability." ) else: try: import tensorflow_probability as tfp # On the first call, check whether a compatible version of TensorFlow is installed # TensorFlow Probability depends on a recent stable release of TensorFlow _ = tfp.distributions.Normal(loc=0.0, scale=1.0) except ImportError: pass _CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc" LARGE_NEGATIVE = -1e8 # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: int | None = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: tf.Tensor) -> tf.Tensor: return tf.math.reduce_mean( keras.metrics.sparse_categorical_crossentropy( y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True ) ) # Copied from transformers.models.clip.modeling_tf_clip.clip_loss with clip->groupvit def groupvit_loss(similarity: tf.Tensor) -> tf.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(tf.transpose(similarity)) return (caption_loss + image_loss) / 2.0 def hard_softmax(logits: tf.Tensor, dim: int) -> tf.Tensor: y_soft = stable_softmax(logits, dim) # Straight through. index = tf.argmax(y_soft, dim) y_hard = tf.one_hot( index, depth=shape_list(logits)[dim], # TensorFlow expects axis to be -1 or between [0, 3). But received: -2 # This is why the following code snippet is used. axis=range(len(shape_list(logits)))[dim], dtype=y_soft.dtype, ) ret = y_hard - tf.stop_gradient(y_soft) + y_soft return ret def gumbel_softmax(logits: tf.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> tf.Tensor: gumbel_dist = tfp.distributions.Gumbel(0.0, 1.0) gumbels = gumbel_dist.sample(tf.shape(logits), dtype=logits.dtype) gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau) y_soft = stable_softmax(gumbels, dim) if hard: # Straight through. index = tf.argmax(y_soft, dim) y_hard = tf.one_hot( index, depth=shape_list(logits)[dim], # TensorFlow expects axis to be -1 or between [0, 3). But received: -2 # This is why the following code snippet is used. axis=range(len(shape_list(logits)))[dim], dtype=y_soft.dtype, ) ret = y_hard - tf.stop_gradient(y_soft) + y_soft else: # Reparametrization trick. ret = y_soft return ret def resize_attention_map(attentions: tf.Tensor, height: int, width: int, align_corners: bool = False) -> tf.Tensor: """ Args: attentions (`tf.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width] height (`int`): height of the output attention map width (`int`): width of the output attention map align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`. Returns: `tf.Tensor`: resized attention map of shape [batch_size, groups, height, width] """ scale = (height * width // attentions.shape[2]) ** 0.5 if height > width: feat_width = int(np.round(width / scale)) feat_height = shape_list(attentions)[2] // feat_width else: feat_height = int(np.round(height / scale)) feat_width = shape_list(attentions)[2] // feat_height batch_size = shape_list(attentions)[0] groups = shape_list(attentions)[1] # number of group token # [batch_size, groups, height x width, groups] -> [batch_size, groups, height, width] attentions = tf.reshape(attentions, (batch_size, groups, feat_height, feat_width)) attentions = tf.transpose(attentions, perm=(0, 2, 3, 1)) if align_corners: attentions = tf.compat.v1.image.resize( attentions, size=(height, width), method="bilinear", align_corners=align_corners, ) else: attentions = tf.image.resize(attentions, size=(height, width), method="bilinear") attentions = tf.transpose(attentions, perm=(0, 3, 1, 2)) return attentions def get_grouping_from_attentions(attentions: tuple[tf.Tensor], hw_shape: tuple[int]) -> tf.Tensor: """ Args: attentions (`tuple(tf.Tensor)`: tuple of attention maps returned by `TFGroupViTVisionTransformer` hw_shape (`tuple(int)`): height and width of the output attention map Returns: `tf.Tensor`: the attention map of shape [batch_size, groups, height, width] """ attn_maps = [] prev_attn_masks = None for attn_masks in attentions: # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups] attn_masks = tf.transpose(attn_masks, perm=(0, 2, 1)) if prev_attn_masks is None: prev_attn_masks = attn_masks else: prev_attn_masks = tf.matmul(prev_attn_masks, attn_masks) # [batch_size, height x width, num_groups] -> [batch_size, num_groups, height x width] -> [batch_size, num_groups, height, width] cur_attn_map = resize_attention_map(tf.transpose(prev_attn_masks, perm=(0, 2, 1)), *hw_shape) attn_maps.append(cur_attn_map) # [batch_size, num_groups, height, width] final_grouping = attn_maps[-1] return tf.stop_gradient(final_grouping) @dataclass class TFGroupViTModelOutput(ModelOutput): """ Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`tf.Tensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`tf.Tensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. segmentation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): Classification scores for each pixel. <Tip warning={true}> The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed. </Tip> text_embeds (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`TFGroupViTTextModel`]. image_embeds (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`TFGroupViTVisionModel`]. text_model_output (`TFBaseModelOutputWithPooling`): The output of the [`TFGroupViTTextModel`]. vision_model_output (`TFBaseModelOutputWithPooling`): The output of the [`TFGroupViTVisionModel`]. """ loss: tf.Tensor | None = None logits_per_image: tf.Tensor | None = None logits_per_text: tf.Tensor | None = None segmentation_logits: tf.Tensor | None = None text_embeds: tf.Tensor | None = None image_embeds: tf.Tensor | None = None text_model_output: TFBaseModelOutputWithPooling = None vision_model_output: TFBaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) class TFGroupViTCrossAttentionLayer(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, **kwargs): super().__init__(**kwargs) self.attn = TFGroupViTAttention(config, name="attn") self.norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm2") self.mlp = TFGroupViTMLP(config, name="mlp") self.norm_post = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post") self.config = config def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False) -> tf.Tensor: x = query x = x + self.attn(query, encoder_hidden_states=key)[0] x = x + self.mlp(self.norm2(x)) x = self.norm_post(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attn", None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, "norm2", None) is not None: with tf.name_scope(self.norm2.name): self.norm2.build([None, None, self.config.hidden_size]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "norm_post", None) is not None: with tf.name_scope(self.norm_post.name): self.norm_post.build([None, None, self.config.hidden_size]) class TFGroupViTAssignAttention(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, **kwargs): super().__init__(**kwargs) self.scale = config.hidden_size**-0.5 self.q_proj = keras.layers.Dense(config.hidden_size, name="q_proj") self.k_proj = keras.layers.Dense(config.hidden_size, name="k_proj") self.v_proj = keras.layers.Dense(config.hidden_size, name="v_proj") self.proj = keras.layers.Dense(config.hidden_size, name="proj") self.assign_eps = config.assign_eps self.config = config def get_attn(self, attn: tf.Tensor, gumbel: bool = True, hard: bool = True, training: bool = False) -> tf.Tensor: if gumbel and training: attn = gumbel_softmax(attn, dim=-2, hard=hard) else: if hard: attn = hard_softmax(attn, dim=-2) else: attn = stable_softmax(attn, axis=-2) return attn def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False): value = key # [batch_size, query_length, channels] query = self.q_proj(query) # [batch_size, key_length, channels] key = self.k_proj(key) # [batch_size, key_length, channels] value = self.v_proj(value) # [batch_size, query_length, key_length] raw_attn = tf.matmul(query, key, transpose_b=True) * self.scale attn = self.get_attn(raw_attn, training=training) soft_attn = self.get_attn(raw_attn, training=training, gumbel=False, hard=False) attn = attn / (tf.math.reduce_sum(attn, axis=-1, keepdims=True) + self.assign_eps) out = tf.matmul(attn, value) out = self.proj(out) return out, soft_attn def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.config.hidden_size]) if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.config.hidden_size]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.config.hidden_size]) if getattr(self, "proj", None) is not None: with tf.name_scope(self.proj.name): self.proj.build([None, None, self.config.hidden_size]) class TFGroupViTTokenAssign(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, num_group_token: int, num_output_group: int, **kwargs): super().__init__(**kwargs) self.num_output_group = num_output_group # norm on group_tokens self.norm_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_tokens") assign_mlp_ratio = ( config.assign_mlp_ratio if isinstance(config.assign_mlp_ratio, collections.abc.Iterable) else (config.assign_mlp_ratio, config.assign_mlp_ratio) ) tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio] self.mlp_inter = TFGroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group, name="mlp_inter") self.norm_post_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post_tokens") # norm on x self.norm_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_x") self.pre_assign_attn = TFGroupViTCrossAttentionLayer(config, name="pre_assign_attn") self.assign = TFGroupViTAssignAttention(config, name="assign") self.norm_new_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_new_x") self.mlp_channels = TFGroupViTMLP( config, config.hidden_size, channels_dim, config.hidden_size, name="mlp_channels" ) self.config = config def project_group_token(self, group_tokens: tf.Tensor) -> tf.Tensor: """ Args: group_tokens (tf.Tensor): group tokens, [batch_size, num_group_tokens, channels] Returns: projected_group_tokens (tf.Tensor): [batch_size, num_output_groups, channels] """ # [B, num_output_groups, C] <- [B, num_group_tokens, C] projected_group_tokens = self.mlp_inter(group_tokens) projected_group_tokens = self.norm_post_tokens(projected_group_tokens) return projected_group_tokens def call(self, image_tokens: tf.Tensor, group_tokens: tf.Tensor, training: bool = False): """ Args: image_tokens (`tf.Tensor`): image tokens, of shape [batch_size, input_length, channels] group_tokens (`tf.Tensor`): group tokens, [batch_size, num_group_tokens, channels] """ group_tokens = self.norm_tokens(group_tokens) image_tokens = self.norm_x(image_tokens) # [batch_size, num_output_groups, channels] projected_group_tokens = self.project_group_token(group_tokens) projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens) new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens) new_image_tokens += projected_group_tokens new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens)) return new_image_tokens, attention def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "norm_tokens", None) is not None: with tf.name_scope(self.norm_tokens.name): self.norm_tokens.build([None, None, self.config.hidden_size]) if getattr(self, "mlp_inter", None) is not None: with tf.name_scope(self.mlp_inter.name): self.mlp_inter.build(None) if getattr(self, "norm_post_tokens", None) is not None: with tf.name_scope(self.norm_post_tokens.name): self.norm_post_tokens.build([None, None, self.config.hidden_size]) if getattr(self, "norm_x", None) is not None: with tf.name_scope(self.norm_x.name): self.norm_x.build([None, None, self.config.hidden_size]) if getattr(self, "pre_assign_attn", None) is not None: with tf.name_scope(self.pre_assign_attn.name): self.pre_assign_attn.build(None) if getattr(self, "assign", None) is not None: with tf.name_scope(self.assign.name): self.assign.build(None) if getattr(self, "norm_new_x", None) is not None: with tf.name_scope(self.norm_new_x.name): self.norm_new_x.build([None, None, self.config.hidden_size]) if getattr(self, "mlp_channels", None) is not None: with tf.name_scope(self.mlp_channels.name): self.mlp_channels.build(None) # Adapted from transformers.models.vit.modeling_tf_vit.TFViTPatchEmbeddings with ViT->GroupViT class TFGroupViTPatchEmbeddings(keras.layers.Layer): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config: GroupViTConfig, **kwargs): super().__init__(**kwargs) image_size, patch_size = config.image_size, config.patch_size num_channels = config.num_channels # hidden_size is a member as it will be required in the call method self.hidden_size = config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.num_channels = num_channels self.config = config self.projection = keras.layers.Conv2D( filters=self.hidden_size, kernel_size=patch_size, strides=patch_size, padding="valid", data_format="channels_last", use_bias=True, kernel_initializer=get_initializer(self.config.initializer_range), bias_initializer="zeros", name="projection", ) def call( self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False ) -> tf.Tensor: batch_size, num_channels, height, width = shape_list(pixel_values) if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if ( not interpolate_pos_encoding and tf.executing_eagerly() and (height != self.image_size[0] or width != self.image_size[1]) ): raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) projection = self.projection(pixel_values) # Change the 2D spatial dimensions to a single temporal dimension. # shape = (batch_size, num_patches, out_channels=embed_dim) num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0]) # In the TFGroupViTVisionEmbeddings the embeddings from this layer will be layer normalized # LayerNormalization layer needs to have static last dimension (otherwise the test_keras_save_load fails with symbolic tensors) # This is why we have used the hidden_size in the reshape method embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, self.hidden_size)) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, None, self.num_channels]) # Adapted from transformers.vit.modeling_tf_vit.TFViTEmbeddings class TFGroupViTVisionEmbeddings(keras.layers.Layer): """ Construct the position and patch embeddings. """ def __init__(self, config: GroupViTVisionConfig, **kwargs): super().__init__(**kwargs) self.patch_embeddings = TFGroupViTPatchEmbeddings(config, name="patch_embeddings") self.dropout = keras.layers.Dropout(rate=config.dropout, name="dropout") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") self.config = config def build(self, input_shape=None): num_patches = self.patch_embeddings.num_patches self.position_embeddings = self.add_weight( shape=(1, num_patches, self.config.hidden_size), initializer="zeros", trainable=True, name="position_embeddings", ) if self.built: return self.built = True if getattr(self, "patch_embeddings", None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build(None) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.config.hidden_size]) def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ batch_size, num_patches, dim = shape_list(embeddings) num_positions = shape_list(self.position_embeddings)[1] if num_patches == num_positions and height == width: return self.position_embeddings patch_pos_embed = self.position_embeddings h0 = height // self.config.patch_size w0 = width // self.config.patch_size patch_pos_embed = tf.image.resize( images=tf.reshape( patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) ), size=(h0, w0), method="bicubic", ) patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim)) return patch_pos_embed def call( self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False ) -> tf.Tensor: _, _, height, width = shape_list(pixel_values) embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) embeddings = self.layernorm(embeddings) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextEmbeddings with CLIP->GroupViT class TFGroupViTTextEmbeddings(keras.layers.Layer): def __init__(self, config: GroupViTTextConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.config = config def build(self, input_shape: tf.TensorShape = None): with tf.name_scope("token_embedding"): self.weight = self.add_weight( shape=(self.config.vocab_size, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name="weight", ) with tf.name_scope("position_embedding"): self.position_embedding = self.add_weight( shape=(self.config.max_position_embeddings, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name="embeddings", ) super().build(input_shape) def call( self, input_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ if input_ids is None and inputs_embeds is None: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embedding, indices=position_ids) position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1)) final_embeddings = inputs_embeds + position_embeds return final_embeddings class TFGroupViTStage(keras.layers.Layer): """This corresponds to the `GroupingLayer` class in the GroupViT implementation.""" def __init__( self, config: GroupViTVisionConfig, depth: int, num_prev_group_token: int, num_group_token: int, num_output_group: int, **kwargs, ): super().__init__(**kwargs) self.config = config self.depth = depth self.num_group_token = num_group_token self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(depth)] if num_group_token > 0: self.downsample = TFGroupViTTokenAssign( config=config, num_group_token=num_group_token, num_output_group=num_output_group, name="downsample", ) else: self.downsample = None if num_prev_group_token > 0 and num_group_token > 0: self.group_projector = [ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="group_projector.0"), TFGroupViTMixerMLP( config, num_prev_group_token, config.hidden_size // 2, num_group_token, name="group_projector.1" ), ] else: self.group_projector = None def build(self, input_shape=None): if self.num_group_token > 0: self.group_token = self.add_weight( shape=(1, self.num_group_token, self.config.hidden_size), initializer="zeros", trainable=True, name="group_token", ) else: self.group_token = None if self.built: return self.built = True if getattr(self, "downsample", None) is not None: with tf.name_scope(self.downsample.name): self.downsample.build(None) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) if getattr(self, "group_projector", None) is not None: with tf.name_scope(self.group_projector[0].name): self.group_projector[0].build([None, None, self.config.hidden_size]) with tf.name_scope(self.group_projector[1].name): self.group_projector[1].build(None) @property def with_group_token(self): return self.group_token is not None def split_x(self, x: tf.Tensor) -> tf.Tensor: if self.with_group_token: return x[:, : -self.num_group_token], x[:, -self.num_group_token :] else: return x, None def concat_x(self, x: tf.Tensor, group_token: tf.Tensor | None = None) -> tf.Tensor: if group_token is None: return x return tf.concat([x, group_token], axis=1) def call( self, hidden_states: tf.Tensor, prev_group_token: tf.Tensor | None = None, output_attentions: bool = False, training: bool = False, ) -> tuple[tf.Tensor]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the grouping tensors of Grouping block. """ if self.with_group_token: group_token = tf.tile(self.group_token, multiples=(shape_list(hidden_states)[0], 1, 1)) if self.group_projector is not None: for layer in self.group_projector: prev_group_token = layer(prev_group_token) group_token = group_token + prev_group_token else: group_token = None x = hidden_states cat_x = self.concat_x(x, group_token) for layer in self.layers: layer_out = layer( cat_x, attention_mask=None, causal_attention_mask=None, output_attentions=None, ) cat_x = layer_out[0] x, group_token = self.split_x(cat_x) attention = None if self.downsample is not None: x, attention = self.downsample(x, group_token) outputs = (x, group_token) if output_attentions: outputs = outputs + (attention,) return outputs class TFGroupViTMLP(keras.layers.Layer): def __init__( self, config: GroupViTVisionConfig, hidden_size: int | None = None, intermediate_size: int | None = None, output_size: int | None = None, **kwargs, ): super().__init__(**kwargs) self.config = config self.activation_fn = get_tf_activation(config.hidden_act) hidden_size = hidden_size if hidden_size is not None else config.hidden_size intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size output_size = output_size if output_size is not None else hidden_size self.fc1 = keras.layers.Dense(intermediate_size, name="fc1") self.fc2 = keras.layers.Dense(output_size, name="fc2") self.intermediate_size = intermediate_size self.hidden_size = hidden_size def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.hidden_size]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.intermediate_size]) class TFGroupViTMixerMLP(TFGroupViTMLP): def call(self, x, training: bool = False): x = super().call(hidden_states=tf.transpose(x, perm=(0, 2, 1))) return tf.transpose(x, perm=(0, 2, 1)) # Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPAttention class TFGroupViTAttention(keras.layers.Layer): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: GroupViTConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.num_attention_heads = config.num_attention_heads self.attention_head_size = self.embed_dim // self.num_attention_heads if self.attention_head_size * self.num_attention_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_attention_heads})." ) factor = config.initializer_factor in_proj_std = (self.embed_dim**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor out_proj_std = (self.embed_dim**-0.5) * factor self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.q_proj = keras.layers.Dense( units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="q_proj" ) self.k_proj = keras.layers.Dense( units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="k_proj" ) self.v_proj = keras.layers.Dense( units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="v_proj" ) self.dropout = keras.layers.Dropout(rate=config.attention_dropout) self.out_proj = keras.layers.Dense( units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name="out_proj" ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention.transpose_for_scores def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, causal_attention_mask: tf.Tensor | None = None, output_attentions: bool | None = None, encoder_hidden_states: tf.Tensor | None = None, training: bool = False, ) -> tuple[tf.Tensor]: """Input shape: Batch x Time x Channel""" batch_size = shape_list(hidden_states)[0] is_cross_attention = encoder_hidden_states is not None mixed_query_layer = self.q_proj(inputs=hidden_states) if is_cross_attention: mixed_key_layer = self.k_proj(inputs=encoder_hidden_states) mixed_value_layer = self.v_proj(inputs=encoder_hidden_states) else: mixed_key_layer = self.k_proj(inputs=hidden_states) mixed_value_layer = self.v_proj(inputs=hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) # apply the causal_attention_mask first if causal_attention_mask is not None: # Apply the causal attention mask (precomputed for all layers in TFCLIPModel call() function) attention_scores = tf.add(attention_scores, causal_attention_mask) if attention_mask is not None: # Apply the attention mask (precomputed for all layers in TFCLIPModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. _attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=_attention_probs) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, embed_dim) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim)) attention_output = self.out_proj(attention_output) # In TFBert, attention weights are returned after dropout. # However, in CLIP, they are returned before dropout. outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPEncoderLayer with CLIP->GroupViT class TFGroupViTEncoderLayer(keras.layers.Layer): def __init__(self, config: GroupViTConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.self_attn = TFGroupViTAttention(config, name="self_attn") self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1") self.mlp = TFGroupViTMLP(config, name="mlp") self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> tuple[tf.Tensor]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. causal_attention_mask (`tf.Tensor`): causal attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`): Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(inputs=hidden_states) attention_outputs = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = attention_outputs[0] hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(inputs=hidden_states) hidden_states = self.mlp(hidden_states=hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, self.embed_dim]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, self.embed_dim]) # Adapted from transformers.models.clip.modeling_tf_clip.TFGroupViTTextEncoder class TFGroupViTTextEncoder(keras.layers.Layer): def __init__(self, config: GroupViTTextConfig, **kwargs): super().__init__(**kwargs) self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> tuple | TFBaseModelOutput: encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFGroupViTVisionEncoder(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, **kwargs) -> None: super().__init__(**kwargs) self.stages = [ TFGroupViTStage( config=config, depth=config.depths[i], num_group_token=config.num_group_tokens[i], num_output_group=config.num_output_groups[i], num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0, name=f"stages_._{i}", ) for i in range(len(config.depths)) ] def call( self, hidden_states: tf.Tensor, output_hidden_states: bool, output_attentions: bool, return_dict: bool, training: bool = False, ) -> tuple | TFBaseModelOutput: all_hidden_states = () if output_hidden_states else None all_groupings = () if output_attentions else None group_tokens = None for stage in self.stages: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = stage(hidden_states, group_tokens, output_attentions) hidden_states = layer_outputs[0] group_tokens = layer_outputs[1] if output_attentions and layer_outputs[2] is not None: all_groupings = all_groupings + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "stages", None) is not None: for layer in self.stages: with tf.name_scope(layer.name): layer.build(None) # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder class TFGroupViTTextTransformer(keras.layers.Layer): def __init__(self, config: GroupViTTextConfig, **kwargs): super().__init__(**kwargs) self.embeddings = TFGroupViTTextEmbeddings(config, name="embeddings") self.encoder = TFGroupViTTextEncoder(config, name="encoder") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm") # For `pooled_output` computation self.eos_token_id = config.eos_token_id self.embed_dim = config.hidden_size def call( self, input_ids: TFModelInputType, attention_mask: tf.Tensor, position_ids: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: input_shape = shape_list(input_ids) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids) batch_size, seq_length = input_shape # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype) # check attention mask and invert # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask) encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] sequence_output = self.final_layer_norm(inputs=sequence_output) if self.eos_token_id == 2: # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here. # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added # ------------------------------------------------------------ # text_embeds.shape = [batch_size, n_ctx, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) pooled_output = tf.gather_nd( params=sequence_output, indices=tf.stack( values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1 ), ) else: # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible) pooled_output = tf.gather_nd( params=sequence_output, indices=tf.stack( values=( tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1), ), axis=1, ), ) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32): # It is possible with an unspecified sequence length for seq_length to be # a runtime value, which is unsupported by tf.constant. Per the TensorFlow # docs, tf.fill can handle runtime dynamic shapes: # https://www.tensorflow.org/api_docs/python/tf/fill diag = tf.cast(tf.fill((seq_length,), 0.0), dtype) # set an additive 2D attention mask with all places being masked to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype) # set diagonal & lower triangular parts to 0 (i.e. the places not to be masked) # TIP: think the 2D matrix as the space of (query_seq, key_seq) to_mask = tf.linalg.band_part(to_mask, 0, -1) # to_mask = tf.linalg.band_part(to_mask, -1, 0) to_mask = tf.linalg.set_diag(to_mask, diagonal=diag) return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length)) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) # Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPVisionTransformer class TFGroupViTVisionTransformer(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, **kwargs): super().__init__(**kwargs) self.embeddings = TFGroupViTVisionEmbeddings(config, name="embeddings") self.encoder = TFGroupViTVisionEncoder(config, name="encoder") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") self.embed_dim = config.hidden_size def call( self, pixel_values: TFModelInputType, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> tuple | TFBaseModelOutputWithPooling: embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( hidden_states=embedding_output, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] # normalize the last hidden state last_hidden_state = self.layernorm(last_hidden_state) pooled_output = tf.math.reduce_mean(last_hidden_state, axis=1) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.embed_dim]) @keras_serializable # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextMainLayer with CLIP->GroupViT class TFGroupViTTextMainLayer(keras.layers.Layer): config_class = GroupViTTextConfig def __init__(self, config: GroupViTTextConfig, **kwargs): super().__init__(**kwargs) self.config = config self.text_model = TFGroupViTTextTransformer(config, name="text_model") def get_input_embeddings(self) -> keras.layers.Layer: return self.text_model.embeddings def set_input_embeddings(self, value: tf.Variable): self.text_model.embeddings.weight = value self.text_model.embeddings.vocab_size = shape_list(value)[0] @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: if input_ids is None: raise ValueError("You have to specify input_ids") input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) text_model_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return text_model_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "text_model", None) is not None: with tf.name_scope(self.text_model.name): self.text_model.build(None) @keras_serializable # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPVisionMainLayer with CLIP->GroupViT class TFGroupViTVisionMainLayer(keras.layers.Layer): config_class = GroupViTVisionConfig def __init__(self, config: GroupViTVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.vision_model = TFGroupViTVisionTransformer(config, name="vision_model") def get_input_embeddings(self) -> keras.layers.Layer: return self.vision_model.embeddings @unpack_inputs def call( self, pixel_values: TFModelInputType | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: if pixel_values is None: raise ValueError("You have to specify pixel_values") vision_model_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return vision_model_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vision_model", None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) @keras_serializable # Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPMainLayer class TFGroupViTMainLayer(keras.layers.Layer): config_class = GroupViTConfig def __init__(self, config: GroupViTConfig, **kwargs): super().__init__(**kwargs) if not isinstance(config.text_config, GroupViTTextConfig): raise TypeError( "config.text_config is expected to be of type GroupViTTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, GroupViTVisionConfig): raise TypeError( "config.vision_config is expected to be of type GroupViTVisionConfig but is of type" f" {type(config.vision_config)}." ) self.config = config text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.projection_intermediate_dim = config.projection_intermediate_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = TFGroupViTTextTransformer(text_config, name="text_model") self.vision_model = TFGroupViTVisionTransformer(vision_config, name="vision_model") self.visual_projection = [ keras.layers.Dense(self.projection_intermediate_dim, name="visual_projection.0"), keras.layers.BatchNormalization(name="visual_projection.1", momentum=0.9, epsilon=1e-5), keras.layers.ReLU(name="visual_projection.2"), keras.layers.Dense(self.projection_dim, name="visual_projection.3"), ] self.text_projection = [ keras.layers.Dense(self.projection_intermediate_dim, name="text_projection.0"), keras.layers.BatchNormalization(name="text_projection.1", momentum=0.9, epsilon=1e-5), keras.layers.ReLU(name="text_projection.2"), keras.layers.Dense(self.projection_dim, name="text_projection.3"), ] def build(self, input_shape=None): self.logit_scale = self.add_weight( shape=(1,), initializer=keras.initializers.Constant(self.config.logit_scale_init_value), trainable=True, name="logit_scale", ) if self.built: return self.built = True if getattr(self, "text_model", None) is not None: with tf.name_scope(self.text_model.name): self.text_model.build(None) if getattr(self, "vision_model", None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, "visual_projection", None) is not None: with tf.name_scope(self.visual_projection[0].name): self.visual_projection[0].build([None, None, None, self.vision_embed_dim]) with tf.name_scope(self.visual_projection[1].name): self.visual_projection[1].build((None, self.projection_intermediate_dim)) with tf.name_scope(self.visual_projection[3].name): self.visual_projection[3].build([None, None, None, self.projection_intermediate_dim]) if getattr(self, "text_projection", None) is not None: with tf.name_scope(self.text_projection[0].name): self.text_projection[0].build([None, None, None, self.text_embed_dim]) with tf.name_scope(self.text_projection[1].name): self.text_projection[1].build((None, self.projection_intermediate_dim)) with tf.name_scope(self.text_projection[3].name): self.text_projection[3].build([None, None, None, self.projection_intermediate_dim]) @unpack_inputs def get_text_features( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tf.Tensor: if input_ids is None: raise ValueError("You have to specify either input_ids") input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = text_outputs[1] for layer in self.text_projection: pooled_output = layer(pooled_output) text_features = pooled_output return text_features @unpack_inputs def get_image_features( self, pixel_values: TFModelInputType | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tf.Tensor: if pixel_values is None: raise ValueError("You have to specify pixel_values") vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = vision_outputs[1] for layer in self.visual_projection: pooled_output = layer(pooled_output) image_features = pooled_output return image_features @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, pixel_values: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, return_loss: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, output_segmentation: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFGroupViTModelOutput | tuple[tf.Tensor]: if input_ids is None: raise ValueError("You have to specify either input_ids") if pixel_values is None: raise ValueError("You have to specify pixel_values") input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if output_segmentation: output_attentions = True vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) image_embeds = vision_outputs[1] for layer in self.visual_projection: image_embeds = layer(image_embeds) text_embeds = text_outputs[1] for layer in self.text_projection: text_embeds = layer(text_embeds) # normalized features image_embeds = image_embeds / tf.norm(image_embeds, axis=-1, keepdims=True) text_embeds = text_embeds / tf.norm(text_embeds, axis=-1, keepdims=True) # cosine similarity as logits logit_scale = tf.math.exp(self.logit_scale) logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale logits_per_image = tf.transpose(logits_per_text) seg_logits = None if output_segmentation: # grouped features # [batch_size_image, num_group, hidden_size] image_group_embeds = vision_outputs[0] # [batch_size_image*num_group, hidden_size] image_group_embeds = tf.reshape(image_group_embeds, shape=(-1, shape_list(image_group_embeds)[-1])) for layer in self.visual_projection: image_group_embeds = layer(image_group_embeds) if output_hidden_states: attentions = vision_outputs[3] else: attentions = vision_outputs[2] # [batch_size_image, num_group, height, width] grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:]) # normalized features image_group_embeds = image_group_embeds / tf.norm( tensor=image_group_embeds, ord="euclidean", axis=-1, keepdims=True ) # [batch_size_image x num_group, batch_size_text] logits_per_image_group = tf.matmul(image_group_embeds, text_embeds, transpose_b=True) * logit_scale # [batch_size_image, batch_size_text, num_group] logits_per_image_group = tf.reshape( logits_per_image_group, shape=(image_embeds.shape[0], -1, text_embeds.shape[0]) ) logits_per_image_group = tf.transpose(logits_per_image_group, perm=(0, 2, 1)) # [batch_size_image, batch_size_text, height x width] flatten_grouping = tf.reshape(grouping, shape=(shape_list(grouping)[0], shape_list(grouping)[1], -1)) # [batch_size_image, batch_size_text, height, width] seg_logits = tf.matmul(logits_per_image_group, flatten_grouping) * logit_scale seg_logits = tf.reshape( seg_logits, shape=(seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3]) ) loss = None if return_loss: loss = groupvit_loss(logits_per_text)[None, ...] if not return_dict: if seg_logits is not None: output = ( logits_per_image, logits_per_text, seg_logits, text_embeds, image_embeds, text_outputs, vision_outputs, ) else: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return TFGroupViTModelOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, segmentation_logits=seg_logits, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) class TFGroupViTPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GroupViTConfig base_model_prefix = "groupvit" GROUPVIT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` </Tip> Args: config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GROUPVIT_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` ``dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ GROUPVIT_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]`, `dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ GROUPVIT_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` ``dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) pixel_values (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` `dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ class TFGroupViTTextModel(TFGroupViTPreTrainedModel): config_class = GroupViTTextConfig main_input_name = "input_ids" def __init__(self, config: GroupViTTextConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.groupvit = TFGroupViTTextMainLayer(config, name="groupvit") @unpack_inputs @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTTextConfig) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: r""" Returns: Examples: ```python >>> from transformers import CLIPTokenizer, TFGroupViTTextModel >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> model = TFGroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" outputs = self.groupvit( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "groupvit", None) is not None: with tf.name_scope(self.groupvit.name): self.groupvit.build(None) class TFGroupViTVisionModel(TFGroupViTPreTrainedModel): config_class = GroupViTVisionConfig main_input_name = "pixel_values" def __init__(self, config: GroupViTVisionConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.groupvit = TFGroupViTVisionMainLayer(config, name="groupvit") @unpack_inputs @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTVisionConfig) def call( self, pixel_values: TFModelInputType | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFGroupViTVisionModel >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> model = TFGroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" outputs = self.groupvit( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "groupvit", None) is not None: with tf.name_scope(self.groupvit.name): self.groupvit.build(None) @add_start_docstrings(GROUPVIT_START_DOCSTRING) class TFGroupViTModel(TFGroupViTPreTrainedModel): config_class = GroupViTConfig def __init__(self, config: GroupViTConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.groupvit = TFGroupViTMainLayer(config, name="groupvit") @unpack_inputs @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def get_text_features( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tf.Tensor: r""" Returns: text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`TFGroupViTTextModel`]. Examples: ```python >>> from transformers import CLIPTokenizer, TFGroupViTModel >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") >>> text_features = model.get_text_features(**inputs) ```""" text_features = self.groupvit.get_text_features( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return text_features @unpack_inputs @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: TFModelInputType | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tf.Tensor: r""" Returns: image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`TFGroupViTVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFGroupViTModel >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="tf") >>> image_features = model.get_image_features(**inputs) ```""" image_features = self.groupvit.get_image_features( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return image_features @unpack_inputs @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFGroupViTModelOutput, config_class=GroupViTConfig) def call( self, input_ids: TFModelInputType | None = None, pixel_values: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, return_loss: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, output_segmentation: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFGroupViTModelOutput | tuple[tf.Tensor]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFGroupViTModel >>> import tensorflow as tf >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = tf.math.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities ```""" outputs = self.groupvit( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, return_loss=return_loss, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_segmentation=output_segmentation, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output: TFGroupViTModelOutput) -> TFGroupViTModelOutput: # TODO: As is this currently fails with saved_model=True, because # TensorFlow cannot trace through nested dataclasses. Reference: # https://github.com/huggingface/transformers/pull/16886 return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "groupvit", None) is not None: with tf.name_scope(self.groupvit.name): self.groupvit.build(None) __all__ = ["TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel"]
transformers/src/transformers/models/groupvit/modeling_tf_groupvit.py/0
{ "file_path": "transformers/src/transformers/models/groupvit/modeling_tf_groupvit.py", "repo_id": "transformers", "token_count": 39538 }
491
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import copy import torch from accelerate import init_empty_weights from transformers import ( AutoConfig, AutoModelForCausalLM, AutoTokenizer, Idefics2Config, Idefics2ForConditionalGeneration, Idefics2ImageProcessor, Idefics2Processor, MistralConfig, ) EPILOG_TXT = """Example: python transformers/src/transformers/models/idefics2/convert_idefics2_weights_to_hf.py --original_model_id HuggingFaceM4/idefics2-8b --output_hub_path org/idefics2 """ KEYS_TO_MODIFY_MAPPING = { "lm_head.weight": "lm_head.linear.weight", "model.layers": "model.text_model.layers", "model.norm": "model.text_model.norm", "model.perceiver_resampler": "model.connector.perceiver_resampler", "model.modality_projection": "model.connector.modality_projection", } WEIGHTS_TO_MERGE_MAPPING = ( # (weights to merge in merging order), (new weight name) ( ("model.embed_tokens.weight", "model.embed_tokens.additional_embedding.weight"), "model.text_model.embed_tokens.weight", ), (("lm_head.linear.weight", "additional_fc.weight"), "lm_head.weight"), ) def convert_state_dict_to_hf(state_dict): new_state_dict = {} for key, value in state_dict.items(): if key.endswith(".inv_freq"): continue for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) new_state_dict[key] = value return new_state_dict def merge_weights(state_dict): new_state_dict = copy.deepcopy(state_dict) # Merge the weights for weights_to_merge, new_weight_name in WEIGHTS_TO_MERGE_MAPPING: for weight in weights_to_merge: assert weight in state_dict, f"Weight {weight} is missing in the state dict" if new_weight_name not in new_state_dict: new_state_dict[new_weight_name] = [state_dict[weight]] else: new_state_dict[new_weight_name].append(state_dict[weight]) new_state_dict[new_weight_name] = torch.cat(new_state_dict[new_weight_name], dim=0) # Remove the weights that were merged for weights_to_merge, new_weight_name in WEIGHTS_TO_MERGE_MAPPING: for weight in weights_to_merge: if weight in new_state_dict and weight != new_weight_name: new_state_dict.pop(weight) return new_state_dict def get_config(checkpoint): if checkpoint == "HuggingFaceM4/idefics2": # We load the config then recreate to use the text_config config = AutoConfig.from_pretrained(checkpoint) text_config = MistralConfig( vocab_size=config.vocab_size + config.additional_vocab_size, hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, num_hidden_layers=config.num_hidden_layers, num_attention_heads=config.num_attention_heads, num_key_value_heads=config.num_key_value_heads, hidden_act=config.hidden_act, max_position_embeddings=config.max_position_embeddings, initializer_range=config.initializer_range, rms_norm_eps=config.rms_norm_eps, tie_word_embeddings=config.tie_word_embeddings, rope_theta=config.rope_theta, sliding_window=config.sliding_window, attention_dropout=config.attention_dropout, pad_token_id=config.pad_token_id, bos_token_id=config.bos_token_id, eos_token_id=config.eos_token_id, ) perceiver_config = config.perceiver_config.to_dict() config = Idefics2Config( text_config=text_config.to_dict(), vision_config=config.vision_config, perceiver_config=perceiver_config, use_cache=config.use_cache, image_token_id=config.image_token_id, tie_word_embeddings=config.tie_word_embeddings, ) return config return AutoConfig.from_pretrained(checkpoint) def convert_idefics2_hub_to_hf(original_model_id, output_hub_path, push_to_hub): # The original model maps to AutoModelForCausalLM, converted we map to Idefics2ForConditionalGeneration original_model = AutoModelForCausalLM.from_pretrained(original_model_id, trust_remote_code=True) # The original model doesn't use the idefics2 processing objects image_seq_len = original_model.config.perceiver_config.resampler_n_latents image_processor = Idefics2ImageProcessor() tokenizer = AutoTokenizer.from_pretrained(original_model_id) processor = Idefics2Processor( image_processor=image_processor, tokenizer=tokenizer, image_seq_len=image_seq_len, ) state_dict = original_model.state_dict() state_dict = convert_state_dict_to_hf(state_dict) # Merge weights state_dict = merge_weights(state_dict) config = get_config(original_model_id) with init_empty_weights(): model = Idefics2ForConditionalGeneration(config) model.load_state_dict(state_dict, strict=True, assign=True) model.save_pretrained(output_hub_path) processor.save_pretrained(output_hub_path) if push_to_hub: model.push_to_hub(output_hub_path, private=True) processor.push_to_hub(output_hub_path, private=True) def main(): parser = argparse.ArgumentParser( epilog=EPILOG_TXT, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( "--original_model_id", help="Hub location of the text model", ) parser.add_argument( "--output_hub_path", help="Location on the hub of the converted model", ) parser.add_argument( "--push_to_hub", action="store_true", help="If set, the model will be pushed to the hub after conversion.", ) args = parser.parse_args() convert_idefics2_hub_to_hf(args.original_model_id, args.output_hub_path, args.push_to_hub) if __name__ == "__main__": main()
transformers/src/transformers/models/idefics2/convert_idefics2_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/idefics2/convert_idefics2_weights_to_hf.py", "repo_id": "transformers", "token_count": 2763 }
492
from typing import Optional, Union import torch import torch.nn as nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.models.ijepa.configuration_ijepa import IJepaConfig from ...modeling_outputs import ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, torch_int from ..vit.modeling_vit import ViTEmbeddings, ViTForImageClassification, ViTModel class IJepaEmbeddings(ViTEmbeddings): def __init__(self, config: IJepaConfig, use_mask_token: bool = False) -> None: super().__init__(config, use_mask_token) # Remove cls_token from IJepaEmbeddings, as it is not used in the model del self.cls_token num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.randn(1, num_patches, config.hidden_size)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] num_positions = self.position_embeddings.shape[1] # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings patch_pos_embed = self.position_embeddings dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return patch_pos_embed def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if bool_masked_pos is not None: seq_length = embeddings.shape[1] mask_tokens = self.mask_token.expand(batch_size, seq_length, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings @auto_docstring class IJepaPreTrainedModel(PreTrainedModel): config: IJepaConfig base_model_prefix = "ijepa" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["IJepaEmbeddings", "IJepaLayer"] _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, IJepaEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ).to(module.position_embeddings.dtype) if module.mask_token is not None: module.mask_token.data.zero_() class IJepaModel(IJepaPreTrainedModel, ViTModel): def __init__(self, config: IJepaConfig, add_pooling_layer: bool = False, use_mask_token: bool = False): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer use_mask_token (`bool`, *optional*, defaults to `False`): Whether to use a mask token for masked image modeling. """ super().__init__(config) self.config = config self.embeddings = IJepaEmbeddings(config, use_mask_token=use_mask_token) @auto_docstring( custom_intro=""" IJepa Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet. <Tip> Note that it's possible to fine-tune IJepa on higher resolution images than the ones it has been trained on, by setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained position embeddings to the higher resolution. </Tip> """ ) class IJepaForImageClassification(IJepaPreTrainedModel, ViTForImageClassification): def __init__(self, config: IJepaConfig): super().__init__(config) self.ijepa = IJepaModel(config, add_pooling_layer=False) self.post_init() def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ijepa( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output.mean(dim=1)) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "IJepaPreTrainedModel", "IJepaModel", "IJepaForImageClassification", ]
transformers/src/transformers/models/ijepa/modular_ijepa.py/0
{ "file_path": "transformers/src/transformers/models/ijepa/modular_ijepa.py", "repo_id": "transformers", "token_count": 4181 }
493
# coding=utf-8 # Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Jamba model configuration""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class JambaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`JambaModel`]. It is used to instantiate a Jamba model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Jamba-v0.1 model. [ai21labs/Jamba-v0.1](https://huggingface.co/ai21labs/Jamba-v0.1) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 65536): Vocabulary size of the Jamba model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`JambaModel`] tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the model has a output word embedding layer. hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 14336): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. num_logits_to_keep (`int` or `None`, *optional*, defaults to 1): Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the logits of the last prompt token are needed for generation. For long sequences, the logits for the entire sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint significantly. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabling this will also allow the model to output the auxiliary loss. See [here]() for more details router_aux_loss_coef (`float`, *optional*, defaults to 0.001): The aux loss factor for the total loss. pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. sliding_window (`int`, *optional*): Sliding window attention window size. If not specified, will default to `None`. max_position_embeddings (`int`, *optional*, defaults to 262144): This value doesn't have any real effect. The maximum sequence length that this model is intended to be used with. It can be used with longer sequences, but performance may degrade. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. num_experts_per_tok (`int`, *optional*, defaults to 2): The number of experts to root per-token, can be also interpreted as the `top-p` routing parameter num_experts (`int`, *optional*, defaults to 16): Number of experts per Sparse MLP layer. expert_layer_period (`int`, *optional*, defaults to 2): Once in this many layers, we will have an expert layer expert_layer_offset (`int`, *optional*, defaults to 1): The first layer index that contains an expert mlp layer attn_layer_period (`int`, *optional*, defaults to 8): Once in this many layers, we will have a vanilla attention layer attn_layer_offset (`int`, *optional*, defaults to 4): The first layer index that contains a vanilla attention mlp layer use_mamba_kernels (`bool`, *optional*, defaults to `True`): Flag indicating whether or not to use the fast mamba kernels. These are available only if `mamba-ssm` and `causal-conv1d` are installed, and the mamba modules are running on a CUDA device. Raises ValueError if `True` and kernels are not available mamba_d_state (`int`, *optional*, defaults to 16): The dimension the mamba state space latents mamba_d_conv (`int`, *optional*, defaults to 4): The size of the mamba convolution kernel mamba_expand (`int`, *optional*, defaults to 2): Expanding factor (relative to hidden_size) used to determine the mamba intermediate size mamba_dt_rank (`Union[int,str]`, *optional*, defaults to `"auto"`): Rank of the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` mamba_conv_bias (`bool`, *optional*, defaults to `True`): Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block. mamba_proj_bias (`bool`, *optional*, defaults to `False`): Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block """ model_type = "jamba" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=65536, tie_word_embeddings=False, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act="silu", initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, num_logits_to_keep=1, output_router_logits=False, router_aux_loss_coef=0.001, pad_token_id=0, bos_token_id=1, eos_token_id=2, sliding_window=None, max_position_embeddings=262144, attention_dropout=0.0, num_experts_per_tok=2, num_experts=16, expert_layer_period=2, expert_layer_offset=1, attn_layer_period=8, attn_layer_offset=4, use_mamba_kernels=True, mamba_d_state=16, mamba_d_conv=4, mamba_expand=2, mamba_dt_rank="auto", mamba_conv_bias=True, mamba_proj_bias=False, **kwargs, ): self.vocab_size = vocab_size self.tie_word_embeddings = tie_word_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window self.max_position_embeddings = max_position_embeddings self.attention_dropout = attention_dropout # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.num_logits_to_keep = num_logits_to_keep self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.expert_layer_period = expert_layer_period self.expert_layer_offset = expert_layer_offset self.attn_layer_period = attn_layer_period self.attn_layer_offset = attn_layer_offset self._check_supported_offset("attention", self.attn_layer_period, self.attn_layer_offset) self._check_supported_offset("expert", self.expert_layer_period, self.expert_layer_offset) self.use_mamba_kernels = use_mamba_kernels self.mamba_d_state = mamba_d_state self.mamba_d_conv = mamba_d_conv self.mamba_expand = mamba_expand self.mamba_dt_rank = math.ceil(self.hidden_size / 16) if mamba_dt_rank == "auto" else mamba_dt_rank self.mamba_conv_bias = mamba_conv_bias self.mamba_proj_bias = mamba_proj_bias super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) @property def layers_block_type(self): return [ "attention" if i % self.attn_layer_period == self.attn_layer_offset else "mamba" for i in range(self.num_hidden_layers) ] @property def layers_num_experts(self): return [ self.num_experts if i % self.expert_layer_period == self.expert_layer_offset else 1 for i in range(self.num_hidden_layers) ] def _check_supported_offset(self, property_: str, period: int, offset: int): if offset >= period: raise ValueError( f"{property_} layer offset ({offset}) must be smaller than {property_} layer period ({period})" ) __all__ = ["JambaConfig"]
transformers/src/transformers/models/jamba/configuration_jamba.py/0
{ "file_path": "transformers/src/transformers/models/jamba/configuration_jamba.py", "repo_id": "transformers", "token_count": 4655 }
494
# coding=utf-8 # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch KOSMOS-2 model.""" import math from dataclasses import dataclass from typing import Any, Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, CausalLMOutputWithCrossAttentions, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int from ...utils.deprecation import deprecate_kwarg from .configuration_kosmos2 import Kosmos2Config, Kosmos2TextConfig, Kosmos2VisionConfig logger = logging.get_logger(__name__) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx @dataclass @auto_docstring( custom_intro=""" Base class for text model's outputs that also contains a pooling of the last hidden states. """ ) class Kosmos2ModelOutput(ModelOutput): r""" past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*): Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`. projection_attentions (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights given by `Kosmos2ImageToTextProjection`, after the attention softmax, used to compute the weighted average in the self-attention heads. vision_model_output (`BaseModelOutputWithPooling`, *optional*): The output of the [`Kosmos2VisionModel`]. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_embeds: Optional[torch.FloatTensor] = None projection_attentions: Optional[tuple[torch.FloatTensor]] = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) @dataclass @auto_docstring( custom_intro=""" Model output class for `Kosmos2ForConditionalGeneration`. """ ) class Kosmos2ForConditionalGenerationModelOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*): Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`. projection_attentions (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights given by `Kosmos2ImageToTextProjection`, after the attention softmax, used to compute the weighted average in the self-attention heads. vision_model_output (`BaseModelOutputWithPooling`, *optional*): The output of the [`Kosmos2VisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_embeds: Optional[torch.FloatTensor] = None projection_attentions: Optional[tuple[torch.FloatTensor]] = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Kosmos2 class Kosmos2VisionEmbeddings(nn.Module): def __init__(self, config: Kosmos2VisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})." ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Adapted from transformers.models.siglip.modeling_siglip.eager_attention_forward -> Kosmos2 doesn't cast attn weights to fp32 def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class Kosmos2VisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.is_causal = False self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" batch_size, seq_length, embed_dim = hidden_states.shape queries = self.q_proj(hidden_states) keys = self.k_proj(hidden_states) values = self.v_proj(hidden_states) queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) # CLIP text model uses both `causal_attention_mask` and `attention_mask` # in case FA2 kernel is called, `is_causal` should be inferred from `causal_attention_mask` if self.config._attn_implementation != "flash_attention_2": if attention_mask is not None and causal_attention_mask is not None: attention_mask = attention_mask + causal_attention_mask elif causal_attention_mask is not None: attention_mask = causal_attention_mask else: self.is_causal = causal_attention_mask is not None attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout, ) attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Kosmos2Vision class Kosmos2VisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoderLayer with AltCLIP->Kosmos2Vision class Kosmos2VisionEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: Kosmos2VisionConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = Kosmos2VisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Kosmos2VisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoder with AltCLIP->Kosmos2Vision class Kosmos2VisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Kosmos2VisionEncoderLayer`]. Args: config: Kosmos2VisionConfig """ def __init__(self, config: Kosmos2VisionConfig): super().__init__() self.config = config self.layers = nn.ModuleList([Kosmos2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Similar to `transformers.models.clip.modeling_clip.CLIPVisionTransformer` but without docstring for `forward` class Kosmos2VisionTransformer(nn.Module): # Copied from transformers.models.altclip.modeling_altclip.AltCLIPVisionTransformer.__init__ with AltCLIPVision->Kosmos2Vision,ALTCLIP_VISION->KOSMOS2_VISION,AltCLIP->Kosmos2Vision def __init__(self, config: Kosmos2VisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = Kosmos2VisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = Kosmos2VisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # Similar to `transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding` but allowing to pass `position_ids` class Kosmos2TextSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.__init__ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.make_weights def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer("weights", emb_weights, persistent=False) @staticmethod # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.get_embedding def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward( self, input_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None, ): if input_ids is not None: bsz, seq_len = input_ids.size() if position_ids is None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length ).to(input_ids.device) else: bsz, seq_len = inputs_embeds.size()[:-1] if position_ids is None: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len + past_key_values_length if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach() # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.create_position_ids_from_inputs_embeds def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length class KosmosTextAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" # Similar to transformers.models.bart.modeling_bart.BartAttention.__init__ except an additional `inner_attn_ln`. def __init__( self, config, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: Optional[bool] = False, add_inner_attn_layernorm: Optional[bool] = False, bias: Optional[bool] = True, layer_idx: Optional[bool] = None, ): super().__init__() self.config = config self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) # End opy self.inner_attn_ln = None if add_inner_attn_layernorm: self.inner_attn_ln = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = encoder_hidden_states is not None batch_size, seq_length = hidden_states.shape[:2] query_states = self.q_proj(hidden_states) query_states = query_states.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = encoder_hidden_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous() if self.inner_attn_ln is not None: attn_output = self.inner_attn_ln(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class Kosmos2TextFFN(nn.Module): def __init__(self, config: Kosmos2TextConfig): super().__init__() self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(config.embed_dim, config.ffn_dim) self.fc2 = nn.Linear(config.ffn_dim, config.embed_dim) self.ffn_layernorm = nn.LayerNorm(config.ffn_dim, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.ffn_layernorm(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class Kosmos2TextBlock(GradientCheckpointingLayer): def __init__(self, config: Kosmos2TextConfig, layer_idx=None): super().__init__() self.embed_dim = config.embed_dim self.self_attn = KosmosTextAttention( config, embed_dim=self.embed_dim, num_heads=config.attention_heads, dropout=config.attention_dropout, is_decoder=True, add_inner_attn_layernorm=True, layer_idx=layer_idx, ) self.dropout = config.dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) if config.add_cross_attention: self.encoder_attn = KosmosTextAttention( config, embed_dim=self.embed_dim, num_heads=config.attention_heads, dropout=config.attention_dropout, is_decoder=True, add_inner_attn_layernorm=False, layer_idx=layer_idx, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.ffn = Kosmos2TextFFN(config) self.final_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: if not hasattr(self, "encoder_attn"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) # FFN hidden_states = self.ffn(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs class Kosmos2TextTransformer(nn.Module): """ Transformer decoder consisting of `config.layers` layers. Each layer is a [`Kosmos2TextBlock`]. Args: config: Kosmos2TextConfig """ def __init__(self, config: Kosmos2TextConfig): super().__init__() self.config = config self.dropout = config.dropout self.layerdrop = config.layerdrop self.embed_scale = math.sqrt(config.embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.embed_dim, padding_idx=config.pad_token_id) self.embed_positions = Kosmos2TextSinusoidalPositionalEmbedding( num_positions=config.max_position_embeddings, embedding_dim=config.embed_dim, padding_idx=config.pad_token_id, ) self.layers = nn.ModuleList([Kosmos2TextBlock(config, layer_idx=i) for i in range(config.layers)]) self.layer_norm = nn.LayerNorm(config.embed_dim, config.layer_norm_eps) self.gradient_checkpointing = False def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward_embedding( self, input_ids, inputs_embeds: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, img_input_mask: Optional[torch.Tensor] = None, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None, ): # The argument `inputs_embeds` should be the one without being multiplied by `self.embed_scale`. if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if image_embeds is not None: inputs_embeds[img_input_mask.to(dtype=torch.bool)] = image_embeds.to(inputs_embeds.device).view( -1, image_embeds.size(-1) ) inputs_embeds = inputs_embeds * self.embed_scale # embed positions positions = self.embed_positions( input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, position_ids=position_ids, ) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if use_cache and past_key_values is None: past_key_values = ( EncoderDecoderCache(DynamicCache(), DynamicCache()) if encoder_hidden_states is not None else DynamicCache() ) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 # We don't need img info. when `past_key_values_length` > 0 if past_key_values_length > 0: image_embeds = None image_embeds_position_mask = None hidden_states = self.forward_embedding( input_ids=input_ids, inputs_embeds=inputs_embeds, image_embeds=image_embeds, img_input_mask=image_embeds_position_mask, past_key_values_length=past_key_values_length, position_ids=position_ids, ) attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, hidden_states, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add final layer norm hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @auto_docstring class Kosmos2PreTrainedModel(PreTrainedModel): config: Kosmos2Config supports_gradient_checkpointing = True _no_split_modules = ["Kosmos2VisionEncoderLayer", "Kosmos2TextBlock"] _supports_attention_backend = True _supports_flash_attn = True _supports_sdpa = True def _init_weights(self, module: nn.Module): """Initialize the weights""" if isinstance(self, Kosmos2VisionModel): factor = self.config.initializer_factor elif isinstance(self, (Kosmos2Model, Kosmos2ForConditionalGeneration)): factor = self.config.vision_config.initializer_factor if isinstance(self, (Kosmos2TextModel, Kosmos2TextForCausalLM)): std = self.config.init_std elif isinstance(self, (Kosmos2Model, Kosmos2ForConditionalGeneration)): std = self.config.text_config.init_std if isinstance(module, Kosmos2VisionEmbeddings): nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, Kosmos2VisionAttention): in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, Kosmos2VisionMLP): in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, KosmosTextAttention): nn.init.normal_(module.q_proj.weight, std=std) nn.init.normal_(module.k_proj.weight, std=std) nn.init.normal_(module.v_proj.weight, std=std) nn.init.normal_(module.out_proj.weight, std=std) elif isinstance(module, Kosmos2TextFFN): nn.init.normal_(module.fc1.weight, std=std) nn.init.normal_(module.fc2.weight, std=std) elif isinstance(module, Kosmos2TextForCausalLM): nn.init.normal_(module.lm_head.weight, std=std) elif isinstance(module, Kosmos2ImageToTextProjection): nn.init.normal_(module.dense.weight, std=std) nn.init.normal_(module.latent_query) elif isinstance(module, Kosmos2TextTransformer): module.embed_tokens.weight.data.normal_(mean=0.0, std=std) if module.embed_tokens.padding_idx is not None: module.embed_tokens.weight.data[module.embed_tokens.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class Kosmos2VisionModel(Kosmos2PreTrainedModel): config: Kosmos2VisionConfig main_input_name = "pixel_values" # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2,self.vision_model->self.model def __init__(self, config: Kosmos2VisionConfig): super().__init__(config) self.model = Kosmos2VisionTransformer(config) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.get_input_embeddings with CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2,self.vision_model->self.model def get_input_embeddings(self) -> nn.Module: return self.model.embeddings.patch_embedding @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: return self.model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) class Kosmos2TextModel(Kosmos2PreTrainedModel): config: Kosmos2TextConfig def __init__(self, config: Kosmos2TextConfig): super().__init__(config) self.model = Kosmos2TextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.model.embed_tokens @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*): Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`. image_embeds_position_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to indicate the location in a sequence to insert the image features . Mask values selected in `[0, 1]`: - 1 for places where to put the image features, - 0 for places that are not for image features (i.e. for text tokens). cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ return self.model( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, position_ids=position_ids, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **kwargs, ) @auto_docstring( custom_intro=""" The text model from KOSMOS-2 with a language modeling head on top (linear layer with weights tied to the input embeddings). """ ) class Kosmos2TextForCausalLM(Kosmos2PreTrainedModel, GenerationMixin): config: Kosmos2TextConfig _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: Kosmos2TextConfig): super().__init__(config) self.model = Kosmos2TextTransformer(config) self.lm_head = nn.Linear(in_features=config.embed_dim, out_features=config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.model.embed_tokens def get_output_embeddings(self) -> nn.Module: return self.lm_head @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, CausalLMOutputWithCrossAttentions]: r""" image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*): Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`. image_embeds_position_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to indicate the location in a sequence to insert the image features . Mask values selected in `[0, 1]`: - 1 for places where to put the image features, - 0 for places that are not for image features (i.e. for text tokens). cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, position_ids=position_ids, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) lm_logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss = self.loss_function(logits=lm_logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, image_embeds=None, image_embeds_position_mask=None, past_key_values=None, attention_mask=None, inputs_embeds=None, use_cache=None, cache_position=None, **model_kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore if cache_position[0] != 0: image_embeds = None image_embeds_position_mask = None # appending `False` to `image_embeds_position_mask` (because `input_ids` grows during generation) elif image_embeds_position_mask is not None: batch_size, seq_len = inputs_embeds.size()[:-1] if inputs_embeds is not None else input_ids.size() mask_len = image_embeds_position_mask.size()[-1] image_embeds_position_mask = torch.cat( ( image_embeds_position_mask, torch.zeros(size=(batch_size, seq_len - mask_len), dtype=torch.bool, device=input_ids.device), ), dim=1, ) model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **model_kwargs, ) # Kosmos2 has offset for position ids, so we need to create them correctly in PositionEmbedding layer model_inputs.pop("position_ids", None) return model_inputs class Kosmos2ImageToTextProjection(nn.Module): """The layer that transforms the image model's output to part of the text model's input (namely, image features)""" def __init__(self, config: Kosmos2Config): super().__init__() self.dense = nn.Linear(config.vision_config.hidden_size, config.text_config.embed_dim) self.latent_query = nn.Parameter(torch.randn(config.latent_query_num, config.text_config.embed_dim)) self.x_attn = KosmosTextAttention( config.text_config, config.text_config.embed_dim, config.text_config.attention_heads, dropout=config.text_config.attention_dropout, is_decoder=False, add_inner_attn_layernorm=False, ) def forward(self, features): hidden_states = self.dense(features) # shape = [batch, latent_query_num, h_dim] latent_query = self.latent_query.unsqueeze(0).expand(hidden_states.size(0), -1, -1) key_value_states = torch.cat([hidden_states, latent_query], dim=1) hidden_states, attn_weights = self.x_attn( hidden_states=latent_query, encoder_hidden_states=key_value_states, past_key_values=None, attention_mask=None, output_attentions=None, ) return hidden_states, attn_weights @auto_docstring( custom_intro=""" KOSMOS-2 Model for generating text and image features. The model consists of a vision encoder and a language model. """ ) class Kosmos2Model(Kosmos2PreTrainedModel): config: Kosmos2Config main_input_name = "pixel_values" def __init__(self, config: Kosmos2Config): super().__init__(config) self.text_model = Kosmos2TextModel(config.text_config) self.vision_model = Kosmos2VisionModel(config.vision_config) self.image_to_text_projection = Kosmos2ImageToTextProjection(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.model.embed_tokens def set_input_embeddings(self, value): self.text_model.model.embed_tokens = value def get_image_features( self, pixel_values: torch.FloatTensor, return_attentions: Optional[bool] = False, interpolate_pos_encoding: Optional[bool] = False, ): """ Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. return_attentions (`bool`, *optional*, defaults to `False`): Whether to return `projection_attentions` or not. interpolate_pos_encoding (`bool`, *optional*, defaults to `False`): Whether to interpolate positional embeddings or not. """ vision_model_output = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`. image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0]) # normalized features image_embeds = nn.functional.normalize(image_embeds, dim=-1) image_embeds, projection_attentions = self.image_to_text_projection(image_embeds) if return_attentions: return image_embeds, projection_attentions return image_embeds @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, input_ids: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, image_embeds: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, Kosmos2ModelOutput]: r""" image_embeds_position_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to indicate the location in a sequence to insert the image features . Mask values selected in `[0, 1]`: - 1 for places where to put the image features, - 0 for places that are not for image features (i.e. for text tokens). image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*): Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Kosmos2Model >>> model = Kosmos2Model.from_pretrained("microsoft/kosmos-2-patch14-224") >>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") >>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = ( ... "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863>" ... "</object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911>" ... "</object>" ... ) >>> inputs = processor(text=text, images=image, return_tensors="pt", add_eos_token=True) >>> last_hidden_state = model( ... pixel_values=inputs["pixel_values"], ... input_ids=inputs["input_ids"], ... attention_mask=inputs["attention_mask"], ... image_embeds_position_mask=inputs["image_embeds_position_mask"], ... ).last_hidden_state >>> list(last_hidden_state.shape) [1, 91, 2048] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_model_output = None projection_attentions = None if image_embeds is None: if pixel_values is None: raise ValueError("You have to specify either `pixel_values` or `image_embeds`.") image_embeds, projection_attentions = self.get_image_features( pixel_values, return_attentions=True, interpolate_pos_encoding=interpolate_pos_encoding ) outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, position_ids=position_ids, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, **kwargs, ) return Kosmos2ModelOutput( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_embeds=image_embeds, projection_attentions=projection_attentions, vision_model_output=vision_model_output, ) @auto_docstring( custom_intro=""" KOSMOS-2 Model for generating text and bounding boxes given an image. The model consists of a vision encoder and a language model. """ ) class Kosmos2ForConditionalGeneration(Kosmos2PreTrainedModel, GenerationMixin): config: Kosmos2Config main_input_name = "pixel_values" _tied_weights_keys = ["text_model.lm_head.weight"] def __init__(self, config: Kosmos2Config): super().__init__(config) self.text_model = Kosmos2TextForCausalLM(config.text_config) self.vision_model = Kosmos2VisionModel(config.vision_config) self.image_to_text_projection = Kosmos2ImageToTextProjection(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.model.embed_tokens def set_input_embeddings(self, value): self.text_model.model.embed_tokens = value def get_output_embeddings(self) -> nn.Module: return self.text_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.text_model.set_output_embeddings(new_embeddings) @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, input_ids: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, image_embeds: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Kosmos2ForConditionalGenerationModelOutput]: r""" image_embeds_position_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to indicate the location in a sequence to insert the image features . Mask values selected in `[0, 1]`: - 1 for places where to put the image features, - 0 for places that are not for image features (i.e. for text tokens). image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*): Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Kosmos2ForConditionalGeneration >>> model = Kosmos2ForConditionalGeneration.from_pretrained("microsoft/kosmos-2-patch14-224") >>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") >>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> prompt = "<grounding> An image of" >>> inputs = processor(text=prompt, images=image, return_tensors="pt") >>> generated_ids = model.generate( ... pixel_values=inputs["pixel_values"], ... input_ids=inputs["input_ids"], ... attention_mask=inputs["attention_mask"], ... image_embeds=None, ... image_embeds_position_mask=inputs["image_embeds_position_mask"], ... use_cache=True, ... max_new_tokens=64, ... ) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False) >>> processed_text '<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>.' >>> caption, entities = processor.post_process_generation(generated_text) >>> caption 'An image of a snowman warming himself by a fire.' >>> entities [('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) vision_model_output = None projection_attentions = None if image_embeds is None: if pixel_values is None: raise ValueError("You have to specify either `pixel_values` or `image_embeds`.") vision_model_output = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`. image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0]) # normalized features image_embeds = nn.functional.normalize(image_embeds, dim=-1) image_embeds, projection_attentions = self.image_to_text_projection(image_embeds) lm_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, position_ids=position_ids, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, **kwargs, ) return Kosmos2ForConditionalGenerationModelOutput( loss=lm_outputs.loss, logits=lm_outputs.logits, past_key_values=lm_outputs.past_key_values, hidden_states=lm_outputs.hidden_states, attentions=lm_outputs.attentions, image_embeds=image_embeds, projection_attentions=projection_attentions, vision_model_output=vision_model_output, ) @torch.no_grad() def generate( self, pixel_values: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, **kwargs, ): # in order to allow `inputs` argument (as in `GenerationMixin`) inputs = kwargs.pop("inputs", None) if pixel_values is not None and inputs is not None: raise ValueError( f"`inputs`: {inputs} were passed alongside `pixel_values` which is not allowed." f"Make sure to either pass `inputs` or pixel_values=..." ) if pixel_values is None and inputs is not None: pixel_values = inputs if image_embeds is None: vision_model_output = self.vision_model(pixel_values) # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`. image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0]) # normalized features image_embeds = nn.functional.normalize(image_embeds, dim=-1) image_embeds, projection_attentions = self.image_to_text_projection(image_embeds) output = self.text_model.generate( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, inputs_embeds=inputs_embeds, **kwargs, ) return output __all__ = ["Kosmos2ForConditionalGeneration", "Kosmos2Model", "Kosmos2PreTrainedModel"]
transformers/src/transformers/models/kosmos2/modeling_kosmos2.py/0
{ "file_path": "transformers/src/transformers/models/kosmos2/modeling_kosmos2.py", "repo_id": "transformers", "token_count": 36158 }
495
# coding=utf-8 # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """LayoutLMv3 model configuration""" from collections import OrderedDict from collections.abc import Mapping from typing import TYPE_CHECKING, Any, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType logger = logging.get_logger(__name__) class LayoutLMv3Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LayoutLMv3Model`]. It is used to instantiate an LayoutLMv3 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LayoutLMv3 [microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the LayoutLMv3 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LayoutLMv3Model`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv3Model`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. max_2d_position_embeddings (`int`, *optional*, defaults to 1024): The maximum value that the 2D position embedding might ever be used with. Typically set this to something large just in case (e.g., 1024). coordinate_size (`int`, *optional*, defaults to `128`): Dimension of the coordinate embeddings. shape_size (`int`, *optional*, defaults to `128`): Dimension of the width and height embeddings. has_relative_attention_bias (`bool`, *optional*, defaults to `True`): Whether or not to use a relative attention bias in the self-attention mechanism. rel_pos_bins (`int`, *optional*, defaults to 32): The number of relative position bins to be used in the self-attention mechanism. max_rel_pos (`int`, *optional*, defaults to 128): The maximum number of relative positions to be used in the self-attention mechanism. max_rel_2d_pos (`int`, *optional*, defaults to 256): The maximum number of relative 2D positions in the self-attention mechanism. rel_2d_pos_bins (`int`, *optional*, defaults to 64): The number of 2D relative position bins in the self-attention mechanism. has_spatial_attention_bias (`bool`, *optional*, defaults to `True`): Whether or not to use a spatial attention bias in the self-attention mechanism. visual_embed (`bool`, *optional*, defaults to `True`): Whether or not to add patch embeddings. input_size (`int`, *optional*, defaults to `224`): The size (resolution) of the images. num_channels (`int`, *optional*, defaults to `3`): The number of channels of the images. patch_size (`int`, *optional*, defaults to `16`) The size (resolution) of the patches. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Example: ```python >>> from transformers import LayoutLMv3Config, LayoutLMv3Model >>> # Initializing a LayoutLMv3 microsoft/layoutlmv3-base style configuration >>> configuration = LayoutLMv3Config() >>> # Initializing a model (with random weights) from the microsoft/layoutlmv3-base style configuration >>> model = LayoutLMv3Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "layoutlmv3" def __init__( self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-5, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_2d_position_embeddings=1024, coordinate_size=128, shape_size=128, has_relative_attention_bias=True, rel_pos_bins=32, max_rel_pos=128, rel_2d_pos_bins=64, max_rel_2d_pos=256, has_spatial_attention_bias=True, text_embed=True, visual_embed=True, input_size=224, num_channels=3, patch_size=16, classifier_dropout=None, **kwargs, ): super().__init__( vocab_size=vocab_size, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, hidden_act=hidden_act, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, initializer_range=initializer_range, layer_norm_eps=layer_norm_eps, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) self.max_2d_position_embeddings = max_2d_position_embeddings self.coordinate_size = coordinate_size self.shape_size = shape_size self.has_relative_attention_bias = has_relative_attention_bias self.rel_pos_bins = rel_pos_bins self.max_rel_pos = max_rel_pos self.has_spatial_attention_bias = has_spatial_attention_bias self.rel_2d_pos_bins = rel_2d_pos_bins self.max_rel_2d_pos = max_rel_2d_pos self.text_embed = text_embed self.visual_embed = visual_embed self.input_size = input_size self.num_channels = num_channels self.patch_size = patch_size self.classifier_dropout = classifier_dropout class LayoutLMv3OnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.12") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) else: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) @property def atol_for_validation(self) -> float: return 1e-5 @property def default_onnx_opset(self) -> int: return 12 def generate_dummy_inputs( self, processor: "ProcessorMixin", batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional["TensorType"] = None, num_channels: int = 3, image_width: int = 40, image_height: int = 40, ) -> Mapping[str, Any]: """ Generate inputs to provide to the ONNX exporter for the specific framework Args: processor ([`ProcessorMixin`]): The processor associated with this model configuration. batch_size (`int`, *optional*, defaults to -1): The batch size to export the model for (-1 means dynamic axis). seq_length (`int`, *optional*, defaults to -1): The sequence length to export the model for (-1 means dynamic axis). is_pair (`bool`, *optional*, defaults to `False`): Indicate if the input is a pair (sentence 1, sentence 2). framework (`TensorType`, *optional*, defaults to `None`): The framework (PyTorch or TensorFlow) that the processor will generate tensors for. num_channels (`int`, *optional*, defaults to 3): The number of channels of the generated images. image_width (`int`, *optional*, defaults to 40): The width of the generated images. image_height (`int`, *optional*, defaults to 40): The height of the generated images. Returns: Mapping[str, Any]: holding the kwargs to provide to the model's forward function """ # A dummy image is used so OCR should not be applied setattr(processor.image_processor, "apply_ocr", False) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = processor.tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence dummy_text = [[" ".join([processor.tokenizer.unk_token]) * seq_length]] * batch_size # Generate dummy bounding boxes dummy_bboxes = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_image = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) inputs = dict( processor( dummy_image, text=dummy_text, boxes=dummy_bboxes, return_tensors=framework, ) ) return inputs __all__ = ["LayoutLMv3Config", "LayoutLMv3OnnxConfig"]
transformers/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py", "repo_id": "transformers", "token_count": 5444 }
496
# coding=utf-8 # Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 LED model.""" from __future__ import annotations import random from dataclasses import dataclass import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions # Public API from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_led import LEDConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "allenai/led-base-16384" _CONFIG_FOR_DOC = "LEDConfig" LARGE_NEGATIVE = -1e8 # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill( (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: int | None = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFLEDLearnedPositionalEmbedding(keras.layers.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): super().__init__(num_embeddings, embedding_dim, **kwargs) def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0): """Input is expected to be of size [bsz x seqlen].""" seq_len = input_shape[1] position_ids = tf.range(seq_len, delta=1, name="range") position_ids += past_key_values_length return super().call(tf.cast(position_ids, dtype=tf.int32)) # Copied from transformers.models.longformer.modeling_tf_longformer.TFLongformerSelfAttention with TFLongformer->TFLEDEncoder class TFLEDEncoderSelfAttention(keras.layers.Layer): def __init__(self, config, layer_id, **kwargs): super().__init__(**kwargs) self.config = config if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_heads = config.num_attention_heads self.head_dim = int(config.hidden_size / config.num_attention_heads) self.embed_dim = config.hidden_size self.query = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="value", ) # separate projection layers for tokens with global attention self.query_global = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="query_global", ) self.key_global = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="key_global", ) self.value_global = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="value_global", ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.global_dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.layer_id = layer_id attention_window = config.attention_window[self.layer_id] assert attention_window % 2 == 0, ( f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}" ) assert attention_window > 0, ( f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}" ) self.one_sided_attn_window_size = attention_window // 2 def build(self, input_shape=None): if not self.built: with tf.name_scope("query_global"): self.query_global.build((self.config.hidden_size,)) with tf.name_scope("key_global"): self.key_global.build((self.config.hidden_size,)) with tf.name_scope("value_global"): self.value_global.build((self.config.hidden_size,)) if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) if getattr(self, "query_global", None) is not None: with tf.name_scope(self.query_global.name): self.query_global.build([None, None, self.config.hidden_size]) if getattr(self, "key_global", None) is not None: with tf.name_scope(self.key_global.name): self.key_global.build([None, None, self.config.hidden_size]) if getattr(self, "value_global", None) is not None: with tf.name_scope(self.value_global.name): self.value_global.build([None, None, self.config.hidden_size]) def call( self, inputs, training=False, ): """ LongformerSelfAttention expects *len(hidden_states)* to be multiple of *attention_window*. Padding to *attention_window* happens in LongformerModel.forward to avoid redoing the padding on each layer. The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to: - -10000: no attention - 0: local attention - +10000: global attention """ # retrieve input args ( hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn, ) = inputs # project hidden states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) batch_size, seq_len, embed_dim = shape_list(hidden_states) tf.debugging.assert_equal( embed_dim, self.embed_dim, message=f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}", ) # normalize query query_vectors /= tf.math.sqrt(tf.cast(self.head_dim, dtype=query_vectors.dtype)) query_vectors = tf.reshape(query_vectors, (batch_size, seq_len, self.num_heads, self.head_dim)) key_vectors = tf.reshape(key_vectors, (batch_size, seq_len, self.num_heads, self.head_dim)) # attn_probs = (batch_size, seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to pad for attention probs remove_from_windowed_attention_mask = attention_mask != 0 # cast to fp32/fp16 then replace 1's with -inf float_mask = tf.cast(remove_from_windowed_attention_mask, dtype=query_vectors.dtype) * LARGE_NEGATIVE # diagonal mask with zeros everywhere and -inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( tf.ones(shape_list(attention_mask)), float_mask, self.one_sided_attn_window_size, ) # pad local attention probs attn_scores += diagonal_mask tf.debugging.assert_equal( shape_list(attn_scores), [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1], message=( f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}," f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {shape_list(attn_scores)}" ), ) # compute global attn indices required through out forward fn ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) = self._get_global_attn_indices(is_index_global_attn) # this function is only relevant for global attention if is_global_attn: attn_scores = self._concat_with_global_key_attn_probs( attn_scores=attn_scores, query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) attn_probs = stable_softmax(attn_scores, axis=-1) # softmax sometimes inserts NaN if all positions are masked, replace them with 0 # Make sure to create a mask with the proper shape: # if is_global_attn==True => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1] # if is_global_attn==False => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1] if is_global_attn: masked_index = tf.tile( is_index_masked[:, :, None, None], (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1), ) else: masked_index = tf.tile( is_index_masked[:, :, None, None], (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + 1), ) attn_probs = tf.where( masked_index, tf.zeros(shape_list(masked_index), dtype=attn_probs.dtype), attn_probs, ) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_probs = tf.reshape(layer_head_mask, (1, 1, -1, 1)) * attn_probs # apply dropout attn_probs = self.dropout(attn_probs, training=training) value_vectors = tf.reshape(value_vectors, (batch_size, seq_len, self.num_heads, self.head_dim)) # if global attention, compute sum of global and local attn if is_global_attn: attn_output = self._compute_attn_output_with_global_indices( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: attn_output = self._sliding_chunks_matmul_attn_probs_value( attn_probs, value_vectors, self.one_sided_attn_window_size ) tf.debugging.assert_equal( shape_list(attn_output), [batch_size, seq_len, self.num_heads, self.head_dim], message="Unexpected size" ) attn_output = tf.reshape(attn_output, (batch_size, seq_len, embed_dim)) # compute value for global attention and overwrite to attention output if is_global_attn: attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden( attn_output=attn_output, hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, layer_head_mask=layer_head_mask, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, training=training, ) else: # Leave attn_output unchanged global_attn_probs = tf.zeros((batch_size, self.num_heads, max_num_global_attn_indices, seq_len)) # make sure that local attention probabilities are set to 0 for indices of global attn # Make sure to create a mask with the proper shape: # if is_global_attn==True => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1] # if is_global_attn==False => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1] if is_global_attn: masked_global_attn_index = tf.tile( is_index_global_attn[:, :, None, None], (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1), ) else: masked_global_attn_index = tf.tile( is_index_global_attn[:, :, None, None], (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + 1), ) attn_probs = tf.where( masked_global_attn_index, tf.zeros(shape_list(masked_global_attn_index), dtype=attn_probs.dtype), attn_probs, ) outputs = (attn_output, attn_probs, global_attn_probs) return outputs def _sliding_chunks_query_key_matmul(self, query, key, window_overlap): """ Matrix multiplication of query and key tensors using with a sliding window attention pattern. This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an overlap of size window_overlap """ batch_size, seq_len, num_heads, head_dim = shape_list(query) tf.debugging.assert_equal( seq_len % (window_overlap * 2), 0, message=f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}", ) tf.debugging.assert_equal( shape_list(query), shape_list(key), message=( f"Shape of query and key should be equal, but got query: {shape_list(query)} and key:" f" {shape_list(key)}" ), ) chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2 query = tf.reshape( tf.transpose(query, (0, 2, 1, 3)), (batch_size * num_heads, seq_len, head_dim), ) key = tf.reshape(tf.transpose(key, (0, 2, 1, 3)), (batch_size * num_heads, seq_len, head_dim)) chunked_query = self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap) # matrix multiplication # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap chunked_query = tf.cast(chunked_query, dtype=chunked_key.dtype) chunked_attention_scores = tf.einsum("bcxd,bcyd->bcxy", chunked_query, chunked_key) # multiply # convert diagonals into columns paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 1], [0, 0]]) diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(chunked_attention_scores, paddings) # allocate space for the overall attention matrix where the chunks are combined. The last dimension # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to # window_overlap previous words). The following column is attention score from each word to itself, then # followed by window_overlap columns for the upper triangle. # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions # - copying the main diagonal and the upper triangle # TODO: This code is most likely not very efficient and should be improved diagonal_attn_scores_up_triang = tf.concat( [ diagonal_chunked_attention_scores[:, :, :window_overlap, : window_overlap + 1], diagonal_chunked_attention_scores[:, -1:, window_overlap:, : window_overlap + 1], ], axis=1, ) # - copying the lower triangle diagonal_attn_scores_low_triang = tf.concat( [ tf.zeros( (batch_size * num_heads, 1, window_overlap, window_overlap), dtype=diagonal_chunked_attention_scores.dtype, ), diagonal_chunked_attention_scores[:, :, -(window_overlap + 1) : -1, window_overlap + 1 :], ], axis=1, ) diagonal_attn_scores_first_chunk = tf.concat( [ tf.roll( diagonal_chunked_attention_scores, shift=[1, window_overlap], axis=[2, 3], )[:, :, :window_overlap, :window_overlap], tf.zeros( (batch_size * num_heads, 1, window_overlap, window_overlap), dtype=diagonal_chunked_attention_scores.dtype, ), ], axis=1, ) first_chunk_mask = ( tf.tile( tf.range(chunks_count + 1, dtype=tf.int64)[None, :, None, None], (batch_size * num_heads, 1, window_overlap, window_overlap), ) < 1 ) diagonal_attn_scores_low_triang = tf.where( first_chunk_mask, diagonal_attn_scores_first_chunk, diagonal_attn_scores_low_triang, ) # merging upper and lower triangle diagonal_attention_scores = tf.concat( [diagonal_attn_scores_low_triang, diagonal_attn_scores_up_triang], axis=-1 ) # separate batch_size and num_heads dimensions again diagonal_attention_scores = tf.transpose( tf.reshape( diagonal_attention_scores, (batch_size, num_heads, seq_len, 2 * window_overlap + 1), ), (0, 2, 1, 3), ) diagonal_attention_scores = self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores @staticmethod def _mask_invalid_locations(input_tensor, window_overlap): # create correct upper triangle bool mask mask_2d_upper = tf.reverse( tf.linalg.band_part(tf.ones(shape=(window_overlap, window_overlap + 1)), -1, 0), axis=[0], ) # pad to full matrix padding = tf.convert_to_tensor( [[0, shape_list(input_tensor)[1] - window_overlap], [0, shape_list(input_tensor)[3] - window_overlap - 1]] ) # create lower mask mask_2d = tf.pad(mask_2d_upper, padding) # combine with upper mask mask_2d = mask_2d + tf.reverse(mask_2d, axis=[0, 1]) # broadcast to full matrix mask_4d = tf.tile(mask_2d[None, :, None, :], (shape_list(input_tensor)[0], 1, 1, 1)) # inf tensor used for masking inf_tensor = -float("inf") * tf.ones_like(input_tensor) # mask input_tensor = tf.where(tf.math.greater(mask_4d, 0), inf_tensor, input_tensor) return input_tensor def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_overlap): """ Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the same shape as `attn_probs` """ batch_size, seq_len, num_heads, head_dim = shape_list(value) tf.debugging.assert_equal( seq_len % (window_overlap * 2), 0, message="Seq_len has to be multiple of 2 * window_overlap" ) tf.debugging.assert_equal( shape_list(attn_probs)[:3], shape_list(value)[:3], message="value and attn_probs must have same dims (except head_dim)", ) tf.debugging.assert_equal( shape_list(attn_probs)[3], 2 * window_overlap + 1, message="attn_probs last dim has to be 2 * window_overlap + 1", ) chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap chunked_attn_probs = tf.reshape( tf.transpose(attn_probs, (0, 2, 1, 3)), ( batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1, ), ) # group batch_size and num_heads dimensions into one value = tf.reshape( tf.transpose(value, (0, 2, 1, 3)), (batch_size * num_heads, seq_len, head_dim), ) # pad seq_len with w at the beginning of the sequence and another window overlap at the end paddings = tf.convert_to_tensor([[0, 0], [window_overlap, window_overlap], [0, 0]]) padded_value = tf.pad(value, paddings, constant_values=-1) # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap frame_size = 3 * window_overlap * head_dim frame_hop_size = (shape_list(padded_value)[1] * head_dim - frame_size) // chunks_count chunked_value = tf.signal.frame( tf.reshape(padded_value, (batch_size * num_heads, -1)), frame_size, frame_hop_size, ) chunked_value = tf.reshape( chunked_value, (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim), ) tf.debugging.assert_equal( shape_list(chunked_value), [batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim], message="Chunked value has the wrong shape", ) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = tf.einsum("bcwd,bcdh->bcwh", chunked_attn_probs, chunked_value) context = tf.transpose( tf.reshape(context, (batch_size, num_heads, seq_len, head_dim)), (0, 2, 1, 3), ) return context @staticmethod def _pad_and_transpose_last_two_dims(hidden_states_padded, paddings): """pads rows and then flips rows and columns""" hidden_states_padded = tf.pad( hidden_states_padded, paddings ) # padding value is not important because it will be overwritten batch_size, chunk_size, seq_length, hidden_dim = shape_list(hidden_states_padded) hidden_states_padded = tf.reshape(hidden_states_padded, (batch_size, chunk_size, hidden_dim, seq_length)) return hidden_states_padded @staticmethod def _pad_and_diagonalize(chunked_hidden_states): """ shift every row 1 step right, converting columns into diagonals. Example: ```python chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629, ] window_overlap = num_rows = 4 ``` (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] """ total_num_heads, num_chunks, window_overlap, hidden_dim = shape_list(chunked_hidden_states) paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 0], [0, window_overlap + 1]]) chunked_hidden_states = tf.pad( chunked_hidden_states, paddings ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten chunked_hidden_states = tf.reshape( chunked_hidden_states, (total_num_heads, num_chunks, -1) ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = tf.reshape( chunked_hidden_states, (total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim), ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states @staticmethod def _chunk(hidden_states, window_overlap): """convert into overlapping chunks. Chunk size = 2w, overlap size = w""" batch_size, seq_length, hidden_dim = shape_list(hidden_states) num_output_chunks = 2 * (seq_length // (2 * window_overlap)) - 1 # define frame size and frame stride (similar to convolution) frame_hop_size = window_overlap * hidden_dim frame_size = 2 * frame_hop_size hidden_states = tf.reshape(hidden_states, (batch_size, seq_length * hidden_dim)) # chunk with overlap chunked_hidden_states = tf.signal.frame(hidden_states, frame_size, frame_hop_size) tf.debugging.assert_equal( shape_list(chunked_hidden_states), [batch_size, num_output_chunks, frame_size], message=( "Make sure chunking is correctly applied. `Chunked hidden states should have output dimension" f" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}." ), ) chunked_hidden_states = tf.reshape( chunked_hidden_states, (batch_size, num_output_chunks, 2 * window_overlap, hidden_dim), ) return chunked_hidden_states @staticmethod def _get_global_attn_indices(is_index_global_attn): """compute global attn indices required throughout forward pass""" # helper variable num_global_attn_indices = tf.math.count_nonzero(is_index_global_attn, axis=1) num_global_attn_indices = tf.cast(num_global_attn_indices, dtype=tf.constant(1).dtype) # max number of global attn indices in batch max_num_global_attn_indices = tf.reduce_max(num_global_attn_indices) # indices of global attn is_index_global_attn_nonzero = tf.where(is_index_global_attn) # helper variable is_local_index_global_attn = tf.range(max_num_global_attn_indices) < tf.expand_dims( num_global_attn_indices, axis=-1 ) # location of the non-padding values within global attention indices is_local_index_global_attn_nonzero = tf.where(is_local_index_global_attn) # location of the padding values within global attention indices is_local_index_no_global_attn_nonzero = tf.where(tf.math.logical_not(is_local_index_global_attn)) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, attn_scores, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ): batch_size = shape_list(key_vectors)[0] # select global key vectors global_key_vectors = tf.gather_nd(key_vectors, is_index_global_attn_nonzero) # create only global key vectors key_vectors_only_global = tf.scatter_nd( is_local_index_global_attn_nonzero, global_key_vectors, shape=( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim, ), ) # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = tf.einsum("blhd,bshd->blhs", query_vectors, key_vectors_only_global) # (batch_size, max_num_global_attn_indices, seq_len, num_heads) attn_probs_from_global_key_trans = tf.transpose(attn_probs_from_global_key, (0, 3, 1, 2)) mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple( shape_list(attn_probs_from_global_key_trans)[-2:] ) mask = tf.ones(mask_shape) * -10000.0 mask = tf.cast(mask, dtype=attn_probs_from_global_key_trans.dtype) # scatter mask attn_probs_from_global_key_trans = tf.tensor_scatter_nd_update( attn_probs_from_global_key_trans, is_local_index_no_global_attn_nonzero, mask, ) # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = tf.transpose(attn_probs_from_global_key_trans, (0, 2, 3, 1)) # concat to attn_probs # (batch_size, seq_len, num_heads, extra attention count + 2*window+1) attn_scores = tf.concat((attn_probs_from_global_key, attn_scores), axis=-1) return attn_scores def _compute_attn_output_with_global_indices( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, ): batch_size = shape_list(attn_probs)[0] # cut local attn probs to global only attn_probs_only_global = attn_probs[:, :, :, :max_num_global_attn_indices] # select global value vectors global_value_vectors = tf.gather_nd(value_vectors, is_index_global_attn_nonzero) # create only global value vectors value_vectors_only_global = tf.scatter_nd( is_local_index_global_attn_nonzero, global_value_vectors, shape=( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim, ), ) # compute attn output only global attn_output_only_global = tf.einsum("blhs,bshd->blhd", attn_probs_only_global, value_vectors_only_global) # reshape attn probs attn_probs_without_global = attn_probs[:, :, :, max_num_global_attn_indices:] # compute attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output_from_hidden( self, attn_output, hidden_states, max_num_global_attn_indices, layer_head_mask, is_local_index_global_attn_nonzero, is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, is_index_masked, training, ): batch_size, seq_len = shape_list(hidden_states)[:2] # prepare global hidden states global_attn_hidden_states = tf.gather_nd(hidden_states, is_index_global_attn_nonzero) global_attn_hidden_states = tf.scatter_nd( is_local_index_global_attn_nonzero, global_attn_hidden_states, shape=(batch_size, max_num_global_attn_indices, self.embed_dim), ) # global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /= tf.math.sqrt( tf.cast(self.head_dim, dtype=global_query_vectors_only_global.dtype) ) global_query_vectors_only_global = self.reshape_and_transpose(global_query_vectors_only_global, batch_size) global_key_vectors = self.reshape_and_transpose(global_key_vectors, batch_size) global_value_vectors = self.reshape_and_transpose(global_value_vectors, batch_size) # compute attn scores global_attn_scores = tf.matmul(global_query_vectors_only_global, global_key_vectors, transpose_b=True) tf.debugging.assert_equal( shape_list(global_attn_scores), [batch_size * self.num_heads, max_num_global_attn_indices, seq_len], message=( "global_attn_scores have the wrong size. Size should be" f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is" f" {shape_list(global_attn_scores)}." ), ) global_attn_scores = tf.reshape( global_attn_scores, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len), ) global_attn_scores_trans = tf.transpose(global_attn_scores, (0, 2, 1, 3)) mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple( shape_list(global_attn_scores_trans)[-2:] ) global_attn_mask = tf.ones(mask_shape) * -10000.0 global_attn_mask = tf.cast(global_attn_mask, dtype=global_attn_scores_trans.dtype) # scatter mask global_attn_scores_trans = tf.tensor_scatter_nd_update( global_attn_scores_trans, is_local_index_no_global_attn_nonzero, global_attn_mask, ) global_attn_scores = tf.transpose(global_attn_scores_trans, (0, 2, 1, 3)) # mask global attn scores attn_mask = tf.tile(is_index_masked[:, None, None, :], (1, shape_list(global_attn_scores)[1], 1, 1)) global_attn_scores = tf.where(attn_mask, -10000.0, global_attn_scores) global_attn_scores = tf.reshape( global_attn_scores, (batch_size * self.num_heads, max_num_global_attn_indices, seq_len), ) # compute global attn probs global_attn_probs_float = stable_softmax(global_attn_scores, axis=-1) # apply layer head masking if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) global_attn_probs_float = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( global_attn_probs_float, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len) ) global_attn_probs_float = tf.reshape( global_attn_probs_float, (batch_size * self.num_heads, max_num_global_attn_indices, seq_len) ) # dropout global_attn_probs = self.global_dropout(global_attn_probs_float, training=training) # global attn output global_attn_output = tf.matmul(global_attn_probs, global_value_vectors) tf.debugging.assert_equal( shape_list(global_attn_output), [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim], message=( "global_attn_output tensor has the wrong size. Size should be" f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is" f" {shape_list(global_attn_output)}." ), ) global_attn_output = tf.reshape( global_attn_output, (batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim), ) # get only non zero global attn output nonzero_global_attn_output = tf.gather_nd( tf.transpose(global_attn_output, (0, 2, 1, 3)), is_local_index_global_attn_nonzero, ) nonzero_global_attn_output = tf.reshape( nonzero_global_attn_output, (shape_list(is_local_index_global_attn_nonzero)[0], -1), ) # overwrite values with global attention attn_output = tf.tensor_scatter_nd_update( attn_output, is_index_global_attn_nonzero, nonzero_global_attn_output ) global_attn_probs = tf.reshape( global_attn_probs, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len) ) return attn_output, global_attn_probs def reshape_and_transpose(self, vector, batch_size): return tf.reshape( tf.transpose( tf.reshape(vector, (batch_size, -1, self.num_heads, self.head_dim)), (0, 2, 1, 3), ), (batch_size * self.num_heads, -1, self.head_dim), ) class TFLEDEncoderAttention(keras.layers.Layer): def __init__(self, config, layer_id, **kwargs): super().__init__(**kwargs) self.longformer_self_attn = TFLEDEncoderSelfAttention(config, layer_id=layer_id, name="longformer_self_attn") self.output_dense = keras.layers.Dense(config.d_model, use_bias=True, name="output") self.config = config def call(self, inputs, training=False): ( hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn, ) = inputs self_outputs = self.longformer_self_attn( [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn], training=training, ) attention_output = self.output_dense(self_outputs[0], training=training) outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "longformer_self_attn", None) is not None: with tf.name_scope(self.longformer_self_attn.name): self.longformer_self_attn.build(None) if getattr(self, "output_dense", None) is not None: with tf.name_scope(self.output_dense.name): self.output_dense.build([None, None, self.config.d_model]) class TFLEDDecoderAttention(keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: tuple[tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training=False, ) -> tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + tf.cast( attention_mask, dtype=attn_weights.dtype ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFLEDEncoderLayer(keras.layers.Layer): def __init__(self, config: LEDConfig, layer_id: int, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFLEDEncoderAttention(config, layer_id, name="self_attn") self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, is_index_masked: tf.Tensor, is_index_global_attn: tf.Tensor, is_global_attn: bool, training=False, ): """ Args: hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`tf.Tensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size *(config.encoder_attention_heads,)*. """ residual = hidden_states layer_outputs = self.self_attn( [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn], training=training, ) hidden_states = layer_outputs[0] tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return (hidden_states,) + layer_outputs[1:] def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.encoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFLEDDecoderLayer(keras.layers.Layer): def __init__(self, config: LEDConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFLEDDecoderAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TFLEDDecoderAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states, attention_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, encoder_layer_head_mask: tf.Tensor | None = None, past_key_value: tuple[tf.Tensor] | None = None, training=False, ) -> tuple[tf.Tensor, tf.Tensor, tf.Tensor, tuple[tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`tf.Tensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape *(batch, seq_len, embed_dim)* encoder_attention_mask (`tf.Tensor`): encoder attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size *(config.encoder_attention_heads,)*. encoder_layer_head_mask (`tf.Tensor`): mask for encoder attention heads in a given layer of size *(config.encoder_attention_heads,)*. past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states # Self-Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=encoder_layer_head_mask, past_key_value=cross_attn_past_key_value, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "encoder_attn", None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, "encoder_attn_layer_norm", None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.decoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFLEDPreTrainedModel(TFPreTrainedModel): config_class = LEDConfig base_model_prefix = "led" @property def input_signature(self): sig = super().input_signature sig["global_attention_mask"] = tf.TensorSpec((None, None), tf.int32, name="global_attention_mask") return sig @dataclass # Copied from transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput with TFLongformer->TFLEDEncoder class TFLEDEncoderBaseModelOutput(ModelOutput): """ Base class for Longformer's outputs, with potential hidden states, local and global attentions. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ last_hidden_state: tf.Tensor | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None global_attentions: tuple[tf.Tensor, ...] | None = None @dataclass class TFLEDSeq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`list[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ last_hidden_state: tf.Tensor | None = None past_key_values: list[tf.Tensor] | None = None decoder_hidden_states: tuple[tf.Tensor, ...] | None = None decoder_attentions: tuple[tf.Tensor, ...] | None = None cross_attentions: tuple[tf.Tensor, ...] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: tuple[tf.Tensor, ...] | None = None encoder_attentions: tuple[tf.Tensor, ...] | None = None encoder_global_attentions: tuple[tf.Tensor, ...] | None = None @dataclass class TFLEDSeq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`list[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: tf.Tensor | None = None logits: tf.Tensor | None = None past_key_values: list[tf.Tensor] | None = None decoder_hidden_states: tuple[tf.Tensor, ...] | None = None decoder_attentions: tuple[tf.Tensor, ...] | None = None cross_attentions: tuple[tf.Tensor, ...] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: tuple[tf.Tensor, ...] | None = None encoder_attentions: tuple[tf.Tensor, ...] | None = None encoder_global_attentions: tuple[tf.Tensor, ...] | None = None LED_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`LEDConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ LED_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): will be made by default and ignore pad tokens. It is not recommended to set this for most use cases. head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tf.Tensor`, *optional*): hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape `(batch_size, sequence_length, hidden_size)` is a sequence of past_key_values (`tuple[tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @keras_serializable class TFLEDEncoder(keras.layers.Layer): config_class = LEDConfig """ Transformer encoder consisting of *config.encoder_layers* self-attention layers. Each layer is a [`TFLEDEncoderLayer`]. Args: config: LEDConfig """ def __init__(self, config: LEDConfig, embed_tokens: keras.layers.Embedding | None = None, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = keras.layers.Dropout(config.dropout) if config.encoder_layerdrop > 0: logger.warning("Layerdrop is currently disabled in TFLED models.") self.layerdrop = 0.0 self.padding_idx = config.pad_token_id if isinstance(config.attention_window, int): assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value" assert config.attention_window > 0, "`config.attention_window` has to be positive" config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer else: assert len(config.attention_window) == config.num_hidden_layers, ( "`len(config.attention_window)` should equal `config.num_hidden_layers`. " f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}" ) self.attention_window = config.attention_window self.embed_tokens = embed_tokens self.embed_positions = TFLEDLearnedPositionalEmbedding( config.max_encoder_position_embeddings, config.d_model, name="embed_positions", ) self.layers = [TFLEDEncoderLayer(config, i, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") self.embed_dim = config.d_model def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, global_attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): """ Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) # merge `global_attention_mask` and `attention_mask` if global_attention_mask is not None: attention_mask = attention_mask * tf.cast((global_attention_mask + 1), dtype=attention_mask.dtype) padding_len, input_ids, attention_mask, inputs_embeds = self._pad_to_window_size( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, pad_token_id=self.padding_idx, ) input_shape = shape_list(attention_mask) # is index masked or global attention is_index_masked = tf.math.less(tf.cast(attention_mask, tf.int8), 1) is_index_global_attn = tf.math.greater(tf.cast(attention_mask, tf.int8), 1) is_global_attn = tf.math.reduce_any(is_index_global_attn) embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout(hidden_states, training=training) # check attention mask and invert if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask)[:, 0, 0, :] attention_mask = attention_mask[:, :, None, None] encoder_states = () if output_hidden_states else None all_attentions = all_global_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), message=( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(head_mask)[0]}." ), ) # encoder layers for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: hidden_states_to_add = self.compute_hidden_states(hidden_states, padding_len) encoder_states = encoder_states + (hidden_states_to_add,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): # skip the layer continue layer_outputs = encoder_layer( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, ) hidden_states = layer_outputs[0] if output_attentions: # bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1) all_attentions = all_attentions + (tf.transpose(layer_outputs[1], (0, 2, 1, 3)),) # bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn all_global_attentions = all_global_attentions + (tf.transpose(layer_outputs[2], (0, 1, 3, 2)),) # undo padding # unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1) hidden_states = self.compute_hidden_states(hidden_states, padding_len) # undo padding if output_attentions: all_attentions = ( tuple(state[:, :, :-padding_len, :] for state in all_attentions) if padding_len > 0 else all_attentions ) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFLEDEncoderBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions, global_attentions=all_global_attentions, ) @tf.function def compute_hidden_states(self, hidden_states, padding_len): return hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states def _pad_to_window_size( self, input_ids, attention_mask, inputs_embeds, pad_token_id, ): """A helper function to pad tokens and mask to work with implementation of Longformer selfattention.""" # padding attention_window = ( self.attention_window if isinstance(self.attention_window, int) else max(self.attention_window) ) assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}" input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds) batch_size, seq_len = input_shape[:2] padding_len = (attention_window - seq_len % attention_window) % attention_window if padding_len > 0: logger.warning_once( f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " f"`config.attention_window`: {attention_window}" ) paddings = tf.convert_to_tensor([[0, 0], [0, padding_len]]) if input_ids is not None: input_ids = tf.pad(input_ids, paddings, constant_values=pad_token_id) if inputs_embeds is not None: if padding_len > 0: input_ids_padding = tf.fill((batch_size, padding_len), pad_token_id) inputs_embeds_padding = self.embed_tokens(input_ids_padding) inputs_embeds = tf.concat([inputs_embeds, inputs_embeds_padding], axis=-2) attention_mask = tf.pad(attention_mask, paddings, constant_values=False) # no attention on the padding tokens return ( padding_len, input_ids, attention_mask, inputs_embeds, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layernorm_embedding", None) is not None: with tf.name_scope(self.layernorm_embedding.name): self.layernorm_embedding.build([None, None, self.embed_dim]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFLEDDecoder(keras.layers.Layer): config_class = LEDConfig """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFLEDDecoderLayer`] Args: config: LEDConfig embed_tokens: output embedding """ def __init__(self, config: LEDConfig, embed_tokens: keras.layers.Embedding | None = None, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.embed_tokens = embed_tokens if config.decoder_layerdrop > 0: logger.warning("Layerdrop is currently disabled in TFLED models.") self.layerdrop = 0.0 self.embed_positions = TFLEDLearnedPositionalEmbedding( config.max_decoder_position_embeddings, config.d_model, name="embed_positions", ) self.layers = [TFLEDDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") self.dropout = keras.layers.Dropout(config.dropout) def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, encoder_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple[tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 # embed positions positions = self.embed_positions(input_shape, past_key_values_length) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) else: combined_attention_mask = _expand_mask( tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] ) if attention_mask is not None and input_shape[-1] > 1: combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) hidden_states = self.layernorm_embedding(hidden_states + positions) hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () all_self_attns = () all_cross_attentions = () present_key_values = () # check if head_mask has a correct number of layers specified if desired if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), message=( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(head_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, encoder_layer_head_mask=encoder_head_mask[idx] if encoder_head_mask is not None else None, past_key_value=past_key_value, ) if use_cache: present_key_values += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) all_cross_attentions += (layer_cross_attn,) if output_hidden_states: all_hidden_states += (hidden_states,) else: all_hidden_states = None all_self_attns = all_self_attns if output_attentions else None all_cross_attentions = all_cross_attentions if output_attentions else None present_key_values = present_key_values if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) else: return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layernorm_embedding", None) is not None: with tf.name_scope(self.layernorm_embedding.name): self.layernorm_embedding.build([None, None, self.config.d_model]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFLEDMainLayer(keras.layers.Layer): config_class = LEDConfig def __init__(self, config: LEDConfig, **kwargs): super().__init__(**kwargs) self.config = config self.shared = keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std), name="led.shared", ) # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) self.shared.load_weight_prefix = "led.shared" self.encoder = TFLEDEncoder(config, self.shared, name="encoder") self.decoder = TFLEDDecoder(config, self.shared, name="decoder") def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared @unpack_inputs def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs: tuple | TFLEDEncoderBaseModelOutput | None = None, global_attention_mask=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): if decoder_input_ids is None and decoder_inputs_embeds is None: use_cache = False if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFLEDEncoderBaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, TFLEDEncoderBaseModelOutput): encoder_outputs = TFLEDEncoderBaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # If the user passed a TFLEDEncoderBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False elif not return_dict and not isinstance(encoder_outputs, tuple): encoder_outputs = encoder_outputs.to_tuple() decoder_outputs = self.decoder( decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, encoder_head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return decoder_outputs + encoder_outputs return TFLEDSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, encoder_global_attentions=encoder_outputs.global_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True # The shared/tied weights expect to be in the model base namespace # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than # the current one. with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"): self.shared.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings( "The bare LED Model outputting raw hidden-states without any specific head on top.", LED_START_DOCSTRING, ) class TFLEDModel(TFLEDPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.led = TFLEDMainLayer(config, name="led") def get_encoder(self): return self.led.encoder def get_decoder(self): return self.led.decoder @unpack_inputs @add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLEDSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, decoder_input_ids: tf.Tensor | None = None, decoder_attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, decoder_head_mask: tf.Tensor | None = None, encoder_outputs: tf.Tensor | None = None, global_attention_mask: tf.Tensor | None = None, past_key_values: tuple[tuple[tf.Tensor]] | None = None, inputs_embeds: tf.Tensor | None = None, decoder_inputs_embeds: tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, ) -> tuple[tf.Tensor] | TFLEDSeq2SeqModelOutput: outputs = self.led( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, global_attention_mask=global_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None enc_g_attns = tf.convert_to_tensor(output.encoder_global_attentions) if self.config.output_attentions else None return TFLEDSeq2SeqModelOutput( last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, encoder_global_attentions=enc_g_attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "led", None) is not None: with tf.name_scope(self.led.name): self.led.build(None) # Copied from transformers.models.bart.modeling_tf_bart.BiasLayer class BiasLayer(keras.layers.Layer): """ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis, so all weights have to be registered in a layer. """ def __init__(self, shape, initializer, trainable, name, **kwargs): super().__init__(name=name, **kwargs) # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) def call(self, x): return x + self.bias @add_start_docstrings( "The LED Model with a language modeling head. Can be used for summarization.", LED_START_DOCSTRING, ) class TFLEDForConditionalGeneration(TFLEDPreTrainedModel): _keys_to_ignore_on_load_unexpected = [ r"led.encoder.embed_tokens.weight", r"led.decoder.embed_tokens.weight", ] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.led = TFLEDMainLayer(config, name="led") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) # TODO (Joao): investigate why LED has numerical issues in XLA generate self.supports_xla_generation = False def get_decoder(self): return self.led.decoder def get_encoder(self): return self.led.encoder def get_bias(self): return {"final_logits_bias": self.bias_layer.bias} def set_bias(self, value): # Replaces the existing layers containing bias for correct (de)serialization. vocab_size = value["final_logits_bias"].shape[-1] self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False ) self.bias_layer.bias.assign(value["final_logits_bias"]) def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) @unpack_inputs @add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFLEDSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, decoder_head_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: TFLEDEncoderBaseModelOutput | None = None, global_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: tf.Tensor | None = None, training: bool = False, ) -> tuple[tf.Tensor] | TFLEDSeq2SeqLMOutput: """ Returns: Examples: ```python >>> from transformers import AutoTokenizer, TFLEDForConditionalGeneration >>> import tensorflow as tf >>> mname = "allenai/led-base-16384" >>> tokenizer = AutoTokenizer.from_pretrained(mname) >>> TXT = "My friends are <mask> but they eat too many carbs." >>> model = TFLEDForConditionalGeneration.from_pretrained(mname) >>> batch = tokenizer([TXT], return_tensors="tf") >>> logits = model(inputs=batch.input_ids).logits >>> probs = tf.nn.softmax(logits[0]) >>> # probs[5] is associated with the mask token ```""" if labels is not None: use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.led( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, global_attention_mask=global_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) lm_logits = tf.matmul(outputs[0], self.led.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return TFLEDSeq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, # index 1 of d outputs decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs cross_attentions=outputs.cross_attentions, # index 4 of d outputs encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out encoder_attentions=outputs.encoder_attentions, # 2 of e out encoder_global_attentions=outputs.encoder_global_attentions, ) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None enc_g_attns = tf.convert_to_tensor(output.encoder_global_attentions) if self.config.output_attentions else None return TFLEDSeq2SeqLMOutput( logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, encoder_global_attentions=enc_g_attns, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) def hf_compute_loss(self, labels, logits): """CrossEntropyLoss that ignores pad tokens""" loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if self.config.tf_legacy_loss: melted_labels = tf.reshape(labels, (-1,)) active_loss = tf.not_equal(melted_labels, self.config.pad_token_id) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(melted_labels, active_loss) return loss_fn(labels, reduced_logits) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_loss = loss_fn(tf.nn.relu(labels), logits) # make sure only non-padding labels affect the loss loss_mask = tf.cast(labels != self.config.pad_token_id, dtype=unmasked_loss.dtype) masked_loss = unmasked_loss * loss_mask reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) return tf.reshape(reduced_masked_loss, (1,)) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "led", None) is not None: with tf.name_scope(self.led.name): self.led.build(None) if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) __all__ = ["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"]
transformers/src/transformers/models/led/modeling_tf_led.py/0
{ "file_path": "transformers/src/transformers/models/led/modeling_tf_led.py", "repo_id": "transformers", "token_count": 55083 }
497
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import os import re import torch from datasets import load_dataset from transformers import ( AutoModelForKeypointDetection, LightGlueForKeypointMatching, LightGlueImageProcessor, ) from transformers.models.lightglue.configuration_lightglue import LightGlueConfig DEFAULT_CHECKPOINT_URL = "https://github.com/cvg/LightGlue/releases/download/v0.1_arxiv/superpoint_lightglue.pth" def prepare_imgs(): dataset = load_dataset("hf-internal-testing/image-matching-test-dataset", split="train") image0 = dataset[0]["image"] image1 = dataset[1]["image"] image2 = dataset[2]["image"] # [image1, image1] on purpose to test the model early stopping return [[image2, image0], [image1, image1]] def verify_model_outputs(model, device): images = prepare_imgs() preprocessor = LightGlueImageProcessor() inputs = preprocessor(images=images, return_tensors="pt").to(device) model.to(device) with torch.no_grad(): outputs = model(**inputs, output_hidden_states=True, output_attentions=True) predicted_matches_values = outputs.matches[0, 0, 20:30] predicted_matching_scores_values = outputs.matching_scores[0, 0, 20:30] predicted_number_of_matches = torch.sum(outputs.matches[0][0] != -1).item() expected_max_number_keypoints = 866 expected_matches_shape = torch.Size((len(images), 2, expected_max_number_keypoints)) expected_matching_scores_shape = torch.Size((len(images), 2, expected_max_number_keypoints)) expected_matches_values = torch.tensor([-1, -1, 5, -1, -1, 19, -1, 10, -1, 11], dtype=torch.int64).to(device) expected_matching_scores_values = torch.tensor([0, 0, 0.2997, 0, 0, 0.6762, 0, 0.8826, 0, 0.5583]).to(device) expected_number_of_matches = 140 assert outputs.matches.shape == expected_matches_shape assert outputs.matching_scores.shape == expected_matching_scores_shape assert torch.allclose(predicted_matches_values, expected_matches_values, atol=1e-2) assert torch.allclose(predicted_matching_scores_values, expected_matching_scores_values, atol=1e-2) assert predicted_number_of_matches == expected_number_of_matches ORIGINAL_TO_CONVERTED_KEY_MAPPING = { r"posenc.Wr": r"positional_encoder.projector", r"self_attn.(\d+).Wqkv": r"transformer_layers.\1.self_attention.Wqkv", r"self_attn.(\d+).out_proj": r"transformer_layers.\1.self_attention.o_proj", r"self_attn.(\d+).ffn.0": r"transformer_layers.\1.self_mlp.fc1", r"self_attn.(\d+).ffn.1": r"transformer_layers.\1.self_mlp.layer_norm", r"self_attn.(\d+).ffn.3": r"transformer_layers.\1.self_mlp.fc2", r"cross_attn.(\d+).to_qk": r"transformer_layers.\1.cross_attention.to_qk", r"cross_attn.(\d+).to_v": r"transformer_layers.\1.cross_attention.v_proj", r"cross_attn.(\d+).to_out": r"transformer_layers.\1.cross_attention.o_proj", r"cross_attn.(\d+).ffn.0": r"transformer_layers.\1.cross_mlp.fc1", r"cross_attn.(\d+).ffn.1": r"transformer_layers.\1.cross_mlp.layer_norm", r"cross_attn.(\d+).ffn.3": r"transformer_layers.\1.cross_mlp.fc2", r"log_assignment.(\d+).matchability": r"match_assignment_layers.\1.matchability", r"log_assignment.(\d+).final_proj": r"match_assignment_layers.\1.final_projection", r"token_confidence.(\d+).token.0": r"token_confidence.\1.token", } def convert_old_keys_to_new_keys(state_dict_keys: list[str]): """ This function should be applied only once, on the concatenated keys to efficiently rename using the key mappings. """ output_dict = {} if state_dict_keys is not None: old_text = "\n".join(state_dict_keys) new_text = old_text for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items(): if replacement is None: new_text = re.sub(pattern, "", new_text) # an empty line continue new_text = re.sub(pattern, replacement, new_text) output_dict = dict(zip(old_text.split("\n"), new_text.split("\n"))) return output_dict def add_keypoint_detector_state_dict(lightglue_state_dict): keypoint_detector = AutoModelForKeypointDetection.from_pretrained("magic-leap-community/superpoint") keypoint_detector_state_dict = keypoint_detector.state_dict() for k, v in keypoint_detector_state_dict.items(): lightglue_state_dict[f"keypoint_detector.{k}"] = v return lightglue_state_dict def split_weights(state_dict): for i in range(9): # Remove unused r values log_assignment_r_key = f"log_assignment.{i}.r" if state_dict.get(log_assignment_r_key, None) is not None: state_dict.pop(log_assignment_r_key) Wqkv_weight = state_dict.pop(f"transformer_layers.{i}.self_attention.Wqkv.weight") Wqkv_bias = state_dict.pop(f"transformer_layers.{i}.self_attention.Wqkv.bias") Wqkv_weight = Wqkv_weight.reshape(256, 3, 256) Wqkv_bias = Wqkv_bias.reshape(256, 3) query_weight, key_weight, value_weight = Wqkv_weight[:, 0], Wqkv_weight[:, 1], Wqkv_weight[:, 2] query_bias, key_bias, value_bias = Wqkv_bias[:, 0], Wqkv_bias[:, 1], Wqkv_bias[:, 2] state_dict[f"transformer_layers.{i}.self_attention.q_proj.weight"] = query_weight state_dict[f"transformer_layers.{i}.self_attention.k_proj.weight"] = key_weight state_dict[f"transformer_layers.{i}.self_attention.v_proj.weight"] = value_weight state_dict[f"transformer_layers.{i}.self_attention.q_proj.bias"] = query_bias state_dict[f"transformer_layers.{i}.self_attention.k_proj.bias"] = key_bias state_dict[f"transformer_layers.{i}.self_attention.v_proj.bias"] = value_bias to_qk_weight = state_dict.pop(f"transformer_layers.{i}.cross_attention.to_qk.weight") to_qk_bias = state_dict.pop(f"transformer_layers.{i}.cross_attention.to_qk.bias") state_dict[f"transformer_layers.{i}.cross_attention.q_proj.weight"] = to_qk_weight state_dict[f"transformer_layers.{i}.cross_attention.q_proj.bias"] = to_qk_bias state_dict[f"transformer_layers.{i}.cross_attention.k_proj.weight"] = to_qk_weight state_dict[f"transformer_layers.{i}.cross_attention.k_proj.bias"] = to_qk_bias return state_dict @torch.no_grad() def write_model( model_path, checkpoint_url, organization, safe_serialization=True, push_to_hub=False, ): os.makedirs(model_path, exist_ok=True) # ------------------------------------------------------------ # LightGlue config # ------------------------------------------------------------ config = LightGlueConfig( descriptor_dim=256, num_hidden_layers=9, num_attention_heads=4, ) config.architectures = ["LightGlueForKeypointMatching"] config.save_pretrained(model_path) print("Model config saved successfully...") # ------------------------------------------------------------ # Convert weights # ------------------------------------------------------------ print(f"Fetching all parameters from the checkpoint at {checkpoint_url}...") original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url) print("Converting model...") all_keys = list(original_state_dict.keys()) new_keys = convert_old_keys_to_new_keys(all_keys) state_dict = {} for key in all_keys: new_key = new_keys[key] state_dict[new_key] = original_state_dict.pop(key).contiguous().clone() del original_state_dict gc.collect() state_dict = split_weights(state_dict) state_dict = add_keypoint_detector_state_dict(state_dict) print("Loading the checkpoint in a LightGlue model...") device = "cuda" with torch.device(device): model = LightGlueForKeypointMatching(config) model.load_state_dict(state_dict) print("Checkpoint loaded successfully...") del model.config._name_or_path print("Saving the model...") model.save_pretrained(model_path, safe_serialization=safe_serialization) del state_dict, model # Safety check: reload the converted model gc.collect() print("Reloading the model to check if it's saved correctly.") model = LightGlueForKeypointMatching.from_pretrained(model_path) print("Model reloaded successfully.") model_name = "lightglue" if "superpoint" in checkpoint_url: model_name += "_superpoint" if checkpoint_url == DEFAULT_CHECKPOINT_URL: print("Checking the model outputs...") verify_model_outputs(model, device) print("Model outputs verified successfully.") if push_to_hub: print("Pushing model to the hub...") model.push_to_hub( repo_id=f"{organization}/{model_name}", commit_message="Add model", ) config.push_to_hub(repo_id=f"{organization}/{model_name}", commit_message="Add config") write_image_processor(model_path, model_name, organization, push_to_hub=push_to_hub) def write_image_processor(save_dir, model_name, organization, push_to_hub=False): if "superpoint" in model_name: image_processor = LightGlueImageProcessor(do_grayscale=True) else: image_processor = LightGlueImageProcessor() image_processor.save_pretrained(save_dir) if push_to_hub: print("Pushing image processor to the hub...") image_processor.push_to_hub( repo_id=f"{organization}/{model_name}", commit_message="Add image processor", ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default=DEFAULT_CHECKPOINT_URL, type=str, help="URL of the original LightGlue checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument("--save_model", action="store_true", help="Save model to local") parser.add_argument( "--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub", ) parser.add_argument( "--organization", default="ETH-CVG", type=str, help="Hub organization in which you want the model to be uploaded.", ) args = parser.parse_args() write_model( args.pytorch_dump_folder_path, args.checkpoint_url, args.organization, safe_serialization=True, push_to_hub=args.push_to_hub, )
transformers/src/transformers/models/lightglue/convert_lightglue_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/lightglue/convert_lightglue_to_hf.py", "repo_id": "transformers", "token_count": 4567 }
498
# coding=utf-8 # Copyright 2024 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Optional, Union import torch from torch import nn from transformers.models.llava_next.image_processing_llava_next_fast import LlavaNextImageProcessorFast from transformers.models.llava_next_video.modeling_llava_next_video import ( LlavaNextVideoCausalLMOutputWithPast, LlavaNextVideoForConditionalGeneration, LlavaNextVideoModel, LlavaNextVideoModelOutputWithPast, LlavaNextVideoPreTrainedModel, TransformersKwargs, get_anyres_image_grid_shape, image_size_to_num_patches, unpad_image, ) from ...cache_utils import Cache from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import DefaultFastImageProcessorKwargs, group_images_by_shape, reorder_images from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, get_image_size, ) from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, can_return_tuple, is_torchvision_available, is_torchvision_v2_available, logging, ) if is_torchvision_available(): if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F logger = logging.get_logger(__name__) class LlavaOnevisionFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): """ image_grid_pinpoints (`list[list[int]]`, *optional*): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess` method. do_pad (`bool`, *optional*): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. """ image_grid_pinpoints: Optional[list[list[int]]] do_pad: Optional[bool] class LlavaOnevisionImageProcessorFast(LlavaNextImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"height": 384, "width": 384} crop_size = None default_to_square = False do_resize = True do_center_crop = None do_rescale = True do_normalize = True do_convert_rgb = True do_pad = True image_grid_pinpoints = [[384, 384], [384, 768], [384, 1152], [384, 1536], [384, 1920], [384, 2304], [768, 384], [768, 768], [768, 1152], [768, 1536], [768, 1920], [768, 2304], [1152, 384], [1152, 768], [1152, 1152], [1152, 1536], [1152, 1920], [1152, 2304], [1536, 384], [1536, 768], [1536, 1152], [1536, 1536], [1536, 1920], [1536, 2304], [1920, 384], [1920, 768], [1920, 1152], [1920, 1536], [1920, 1920], [1920, 2304], [2304, 384], [2304, 768], [2304, 1152], [2304, 1536], [2304, 1920], [2304, 2304]] # fmt: skip model_input_names = ["pixel_values", "image_sizes", "batch_num_images"] # Copied from transformers.models.llava.image_processing_llava_fast.LlavaImageProcessorFast.pad_to_square def pad_to_square( self, images: "torch.Tensor", background_color: Union[int, tuple[int, int, int]] = 0, ) -> "torch.Tensor": """ Pads an image to a square based on the longest edge. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in mutli-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. """ height, width = get_image_size(images, ChannelDimension.FIRST) if height == width: return images num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0] if isinstance(background_color, int): background_color = [background_color] + [0] * (num_channels - 1) elif len(background_color) != num_channels: raise ValueError( f"background_color must have no more than {num_channels} elements to match the number of channels" ) max_dim = max(height, width) paste_x_left = (max_dim - width) // 2 paste_y_left = (max_dim - height) // 2 paste_x_right = max_dim - width - paste_x_left paste_y_right = max_dim - height - paste_y_left padded_images = F.pad( images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color ) return padded_images @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaOnevisionFastImageProcessorKwargs]) -> BatchFeature: if isinstance(images, (tuple, list)) and isinstance(images[0], (tuple, list)): # if the first element is a list, we assume that all elements are lists batch_num_images = [len(x) for x in images] elif isinstance(images, (tuple, list)): # treat this as a single-image case for backward compatibility batch_num_images = [1] * len(images) else: batch_num_images = [1] kwargs["batch_num_images"] = batch_num_images return super().preprocess(images, **kwargs) def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, image_grid_pinpoints: list[list[int]], interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, batch_num_images: list[int], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: processed_images = [] image_sizes = [] # only single image patching is supported need_patching = [n == 1 for n in batch_num_images for _ in range(n)] # Determine the size tuple if size and size.height and size.width: size_tuple = (size.height, size.width) else: size_tuple = (size.shortest_edge, size.shortest_edge) # Determine the patch size if crop_size and crop_size.height: patch_size = crop_size.height elif size and size.height: patch_size = size.height else: patch_size = size.shortest_edge for i, image in enumerate(images): if need_patching[i]: image_patches = self._get_image_patches( image, image_grid_pinpoints, size=size_tuple, patch_size=patch_size, interpolation=interpolation, ) else: padded_image = self.pad_to_square( images=image, background_color=tuple(int(x * 255) for x in self.image_mean) ) image_patches = [padded_image] # Group images by size for batched processing processed_image_patches_grouped = {} grouped_image_patches, grouped_image_patches_index = group_images_by_shape( image_patches, disable_grouping=disable_grouping ) for shape, stacked_image_patches in grouped_image_patches.items(): if do_resize: stacked_image_patches = self.resize( image=stacked_image_patches, size=size, interpolation=interpolation, ) if do_center_crop: stacked_image_patches = self.center_crop(stacked_image_patches, crop_size) # Fused rescale and normalize stacked_image_patches = self.rescale_and_normalize( stacked_image_patches, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_image_patches_grouped[shape] = stacked_image_patches processed_image_patches = reorder_images(processed_image_patches_grouped, grouped_image_patches_index) processed_image_patches = ( torch.stack(processed_image_patches, dim=0) if return_tensors else processed_image_patches ) processed_images.append(processed_image_patches) image_sizes.append(get_image_size(image, ChannelDimension.FIRST)) if do_pad: processed_images = self._pad_for_batching(processed_images) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature( data={"pixel_values": processed_images, "image_sizes": image_sizes, "batch_num_images": batch_num_images}, tensor_type=return_tensors, ) class LlavaOnevisionModelOutputWithPast(LlavaNextVideoModelOutputWithPast): pass class LlavaOnevisionCausalLMOutputWithPast(LlavaNextVideoCausalLMOutputWithPast): pass class LlavaOnevisionPreTrainedModel(LlavaNextVideoPreTrainedModel): pass class LlavaOnevisionModel(LlavaNextVideoModel): def __init__(self, config): super().__init__(config) del self.vision_resampler def pack_image_features(self, image_features, image_sizes, image_newline=None, vision_aspect_ratio="anyres_max_9"): """ Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors. Args: image_features (`list[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`) List of image feature tensor, each contains all the visual feature of all patches. image_sizes (`torch.Tensor` of shape `(num_images, 2)`) Actual image size of each images (H, W). image_newline (`torch.Tensor` of shape `(embed_dim)`) New line embedding vector. vision_aspect_ratio (`str`, *optional*, "anyres_max_9"): Aspect ratio used when processong image features. The default value is "anyres_max_9". Returns: image_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`) feature_lens (`list[int]`) token length of each image in image_features """ new_image_features = [] feature_lens = [] for image_idx, image_feature in enumerate(image_features): if image_feature.shape[0] > 1: base_image_feature = image_feature[0] image_feature = image_feature[1:] height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size if height * width != base_image_feature.shape[0]: raise ValueError("The number of patches is not consistent with the image size.") num_patch_height, num_patch_width = get_anyres_image_grid_shape( image_sizes[image_idx], self.config.image_grid_pinpoints, self.config.vision_config.image_size, ) image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1) image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() image_feature = image_feature.flatten(1, 2).flatten(2, 3) image_feature = unpad_image(image_feature, image_sizes[image_idx]) max_num_patches = int(vision_aspect_ratio.strip("anyres_max_")) channels, curr_height, curr_width = image_feature.shape ratio = math.sqrt(curr_height * curr_width / (max_num_patches * height**2)) if ratio > 1.1: image_feature = image_feature[None] image_feature = nn.functional.interpolate( image_feature, [int(curr_height // ratio), int(curr_width // ratio)], mode="bilinear" )[0] if image_newline is not None: image_feature = torch.cat( ( image_feature, image_newline[:, None, None] .expand(*image_feature.shape[:-1], 1) .to(image_feature.device, image_feature.dtype), ), dim=-1, ) image_feature = image_feature.flatten(1, 2).transpose(0, 1) image_feature = torch.cat((base_image_feature, image_feature), dim=0) else: image_feature = image_feature[0] if image_newline is not None: image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0) new_image_features.append(image_feature) feature_lens.append(image_feature.size(0)) feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features[0].device) return new_image_features, feature_lens def apply_pooling(self, image_features): height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size batch_frames, seq_len, dim = image_features.shape image_features = image_features.view(batch_frames, height, width, -1) image_features = image_features.permute(0, 3, 1, 2).contiguous() height, width = image_features.shape[2:] scaled_shape = [math.ceil(height / 2), math.ceil(width / 2)] image_features = nn.functional.interpolate(image_features, size=scaled_shape, mode="bilinear") image_features = image_features.permute(0, 2, 3, 1) image_features = image_features.view(batch_frames, -1, dim) return image_features def get_image_features( self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, vision_aspect_ratio: Optional[str] = None, batch_num_images: Optional[torch.LongTensor] = None, ): """ Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`) The tensors corresponding to the input images. image_sizes (`torch.Tensor` of shape `(num_images, 2)`) Actual image size of each images (H, W). vision_feature_layer (`Union[int, list[int]]`): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. vision_feature_select_strategy (`str`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"` batch_num_images (`torch.LongTensor`, *optional*): Number of images in each sample. Returns: image_features (list[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches and are of shape `(num_patches, image_length, embed_dim)`). """ vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) vision_feature_select_strategy = ( vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy ) vision_aspect_ratio = ( vision_aspect_ratio if vision_aspect_ratio is not None else self.config.vision_aspect_ratio ) # ! infer image_num_patches from image_sizes if batch_num_images is None: # treat this as a single-image case for backward compatibility need_patching = [True] * len(image_sizes) else: need_patching = [n == 1 for n in batch_num_images for _ in range(n)] image_num_patches = [ image_size_to_num_patches( image_size=imsize, grid_pinpoints=self.config.image_grid_pinpoints, patch_size=self.config.vision_config.image_size, ) if should_patch else 1 for imsize, should_patch in zip(image_sizes, need_patching) ] if pixel_values.dim() == 5: # stacked if input is (batch_size, num_patches, num_channels, height, width) _pixel_values_list = [pix_val[:num_patch] for pix_val, num_patch in zip(pixel_values, image_num_patches)] pixel_values = torch.cat(_pixel_values_list, dim=0) elif pixel_values.dim() != 4: # otherwise has to be stacked from list of (num_patches, num_channels, height, width) raise ValueError(f"pixel_values of shape {pixel_values.shape}, expect to be of 4 or 5 dimensions") image_features = self.vision_tower(pixel_values, output_hidden_states=True) # If we have one vision feature layer, return the corresponding hidden states, # otherwise, select the hidden states of each feature layer and concatenate them if isinstance(vision_feature_layer, int): selected_image_feature = image_features.hidden_states[vision_feature_layer] else: hs_pool = [image_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer] selected_image_feature = torch.cat(hs_pool, dim=-1) if vision_feature_select_strategy == "default": selected_image_feature = selected_image_feature[:, 1:] elif vision_feature_select_strategy == "full": selected_image_feature = selected_image_feature image_features = self.multi_modal_projector(selected_image_feature) image_features = torch.split(image_features, image_num_patches, dim=0) image_features, feature_lens = self.pack_image_features( image_features, image_sizes, image_newline=self.image_newline, vision_aspect_ratio=vision_aspect_ratio, ) return image_features def get_video_features( self, pixel_values: torch.FloatTensor, vision_feature_layer: Union[int, list[int]], vision_feature_select_strategy: str, ): """ Obtains video last hidden states from the vision tower, apply multimodal projection and pooling. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`) The tensors corresponding to the input video. vision_feature_layer (`Union[int, list[int]], *optional*, defaults to -2`): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. vision_feature_select_strategy (`str`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"` Returns: video_features (list[`torch.Tensor`]): List of video feature tensor, each contains all the visual feature of all patches and are of shape `(num_videos, video_length, embed_dim)`). """ batch_size, frames, channels, height, width = pixel_values.shape pixel_values = pixel_values.view(batch_size * frames, channels, height, width) video_features = self.vision_tower(pixel_values, output_hidden_states=True) # If we have one vision feature layer, return the corresponding hidden states, # otherwise, select the hidden states of each feature layer and concatenate them if isinstance(vision_feature_layer, int): selected_video_feature = video_features.hidden_states[vision_feature_layer] else: hs_pool = [video_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer] selected_video_feature = torch.cat(hs_pool, dim=-1) if vision_feature_select_strategy == "default": selected_video_feature = selected_video_feature[:, 1:] elif vision_feature_select_strategy == "full": selected_video_feature = selected_video_feature video_features = self.multi_modal_projector(selected_video_feature) video_features = self.apply_pooling(video_features) video_features = video_features.reshape(batch_size, frames * video_features.shape[1], -1) return video_features def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, image_sizes: Optional[torch.LongTensor] = None, pixel_values_videos: torch.FloatTensor = None, image_sizes_videos: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, vision_aspect_ratio: Optional[str] = None, batch_num_images: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, LlavaOnevisionModelOutputWithPast]: r""" image_sizes_videos (`torch.LongTensor` of shape `(batch_size, frames, 2)`, *optional*): The sizes of the videos in the batch, being (height, width) for each frame in the video. vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`): Aspect ratio used when processong image features. The default value is "anyres_max_9". batch_num_images (`torch.LongTensor`, *optional*): Number of images in each sample. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) vision_feature_select_strategy = ( vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy ) vision_aspect_ratio = ( vision_aspect_ratio if vision_aspect_ratio is not None else self.config.vision_aspect_ratio ) if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) # Images are processed with Anyres if pixel_values is not None: image_features = self.get_image_features( pixel_values, image_sizes, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, batch_num_images=batch_num_images, ) image_features = torch.cat(image_features, dim=0) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask, _ = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) # Video are simply embedded and further pooled to decrease seq len if pixel_values_videos is not None: video_features = self.get_video_features( pixel_values_videos, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, ) image_newline = ( self.image_newline[None, None, :].repeat(video_features.shape[0], 1, 1).to(video_features.device) ) video_features = torch.cat((video_features, image_newline), dim=1) video_features = video_features.flatten(0, 1).to(inputs_embeds.device, inputs_embeds.dtype) _, special_video_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, video_features=video_features ) inputs_embeds = inputs_embeds.masked_scatter(special_video_mask, video_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) return LlavaOnevisionModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_features if pixel_values is not None else None, video_hidden_states=video_features if pixel_values_videos is not None else None, ) class LlavaOnevisionForConditionalGeneration(LlavaNextVideoForConditionalGeneration): @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, image_sizes: Optional[torch.LongTensor] = None, pixel_values_videos: torch.FloatTensor = None, image_sizes_videos: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, vision_aspect_ratio: Optional[str] = None, batch_num_images: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, LlavaOnevisionCausalLMOutputWithPast]: r""" image_sizes_videos (`torch.LongTensor` of shape `(batch_size, frames, 2)`, *optional*): The sizes of the videos in the batch, being (height, width) for each frame in the video. vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`): Aspect ratio used when processong image features. The default value is "anyres_max_9". batch_num_images (`torch.LongTensor`, *optional*): Number of images in each sample. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> import torch >>> from transformers import LlavaOnevisionProcessor, LlavaOnevisionForConditionalGeneration >>> model = LlavaOnevisionForConditionalGeneration.from_pretrained("llava-hf/llava-onevision-qwen2-7b-ov-hf", dtype="float16", device_map="cuda:0") >>> processor = LlavaOnevisionProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-7b-ov-hf") >>> conversation = [ ... { ... "role": "user", ... "content": [ ... {"type": "text", "text": "What is shown in this image?"}, ... {"type": "image"}, ... ], ... }, ... ] >>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) >>> image_file = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> raw_image = Image.open(requests.get(image_file, stream=True).raw) >>> inputs = processor(text=prompt, images=raw_image, return_tensors='pt').to(0, torch.float16) >>> output = model.generate(**inputs, max_new_tokens=20, do_sample=False) >>> processor.batch_decode(output, skip_special_tokens=True)[0] "user\n\nWhat is shown in this image?\nassistant\ncat" ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) vision_feature_select_strategy = ( vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy ) vision_aspect_ratio = ( vision_aspect_ratio if vision_aspect_ratio is not None else self.config.vision_aspect_ratio ) outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_sizes=image_sizes, image_sizes_videos=image_sizes_videos, vision_aspect_ratio=vision_aspect_ratio, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, batch_num_images=batch_num_images, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) return LlavaOnevisionCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, video_hidden_states=outputs.video_hidden_states, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, image_sizes=None, pixel_values_videos=None, image_sizes_videos=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) if cache_position[0] == 0: # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model model_inputs["pixel_values"] = pixel_values model_inputs["image_sizes"] = image_sizes model_inputs["pixel_values_videos"] = pixel_values_videos model_inputs["image_sizes_videos"] = image_sizes_videos return model_inputs __all__ = [ "LlavaOnevisionImageProcessorFast", "LlavaOnevisionModel", "LlavaOnevisionForConditionalGeneration", "LlavaOnevisionPreTrainedModel", ]
transformers/src/transformers/models/llava_onevision/modular_llava_onevision.py/0
{ "file_path": "transformers/src/transformers/models/llava_onevision/modular_llava_onevision.py", "repo_id": "transformers", "token_count": 15443 }
499
# coding=utf-8 # Copyright Studio Ousia and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """LUKE configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class LukeConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LukeModel`]. It is used to instantiate a LUKE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LUKE [studio-ousia/luke-base](https://huggingface.co/studio-ousia/luke-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50267): Vocabulary size of the LUKE model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LukeModel`]. entity_vocab_size (`int`, *optional*, defaults to 500000): Entity vocabulary size of the LUKE model. Defines the number of different entities that can be represented by the `entity_ids` passed when calling [`LukeModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. entity_emb_size (`int`, *optional*, defaults to 256): The number of dimensions of the entity embedding. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`LukeModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_entity_aware_attention (`bool`, *optional*, defaults to `True`): Whether or not the model should use the entity-aware self-attention mechanism proposed in [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention (Yamada et al.)](https://huggingface.co/papers/2010.01057). classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. Examples: ```python >>> from transformers import LukeConfig, LukeModel >>> # Initializing a LUKE configuration >>> configuration = LukeConfig() >>> # Initializing a model from the configuration >>> model = LukeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "luke" def __init__( self, vocab_size=50267, entity_vocab_size=500000, hidden_size=768, entity_emb_size=256, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_entity_aware_attention=True, classifier_dropout=None, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): """Constructs LukeConfig.""" super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.entity_vocab_size = entity_vocab_size self.hidden_size = hidden_size self.entity_emb_size = entity_emb_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_entity_aware_attention = use_entity_aware_attention self.classifier_dropout = classifier_dropout __all__ = ["LukeConfig"]
transformers/src/transformers/models/luke/configuration_luke.py/0
{ "file_path": "transformers/src/transformers/models/luke/configuration_luke.py", "repo_id": "transformers", "token_count": 2482 }
500
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MaskFormer Swin Transformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class MaskFormerSwinConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MaskFormerSwinModel`]. It is used to instantiate a Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Swin [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 96): Dimensionality of patch embedding. depths (`list[int]`, *optional*, defaults to `[2, 2, 6, 2]`): Depth of each layer in the Transformer encoder. num_heads (`list[int]`, *optional*, defaults to `[3, 6, 12, 24]`): Number of attention heads in each layer of the Transformer encoder. window_size (`int`, *optional*, defaults to 7): Size of windows. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to True): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. use_absolute_embeddings (`bool`, *optional*, defaults to False): Whether or not to add absolute position embeddings to the patch embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import MaskFormerSwinConfig, MaskFormerSwinModel >>> # Initializing a microsoft/swin-tiny-patch4-window7-224 style configuration >>> configuration = MaskFormerSwinConfig() >>> # Initializing a model (with random weights) from the microsoft/swin-tiny-patch4-window7-224 style configuration >>> model = MaskFormerSwinModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "maskformer-swin" attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-5, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) __all__ = ["MaskFormerSwinConfig"]
transformers/src/transformers/models/maskformer/configuration_maskformer_swin.py/0
{ "file_path": "transformers/src/transformers/models/maskformer/configuration_maskformer_swin.py", "repo_id": "transformers", "token_count": 2762 }
501
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import Optional from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: MBartTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip class MBartTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" MBART tokenizer (backed by HuggingFace's *tokenizers* library). Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import MBartTokenizerFast >>> tokenizer = MBartTokenizerFast.from_pretrained( ... "facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt") ```""" vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = MBartTokenizer prefix_tokens: list[int] = [] suffix_tokens: list[int] = [] def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", src_lang=None, tgt_lang=None, additional_special_tokens=None, **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token _additional_special_tokens = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=_additional_special_tokens, **kwargs, ) self.vocab_file = vocab_file self.lang_code_to_id = { lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES } self._src_lang = src_lang if src_lang is not None else "en_XX" self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang) self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An MBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def prepare_seq2seq_batch( self, src_texts: list[str], src_lang: str = "en_XX", tgt_texts: Optional[list[str]] = None, tgt_lang: str = "ro_RO", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" self.cur_lang_code = self.convert_tokens_to_ids(src_lang) self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" self.cur_lang_code = self.convert_tokens_to_ids(lang) self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) __all__ = ["MBartTokenizerFast"]
transformers/src/transformers/models/mbart/tokenization_mbart_fast.py/0
{ "file_path": "transformers/src/transformers/models/mbart/tokenization_mbart_fast.py", "repo_id": "transformers", "token_count": 4695 }
502
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert MLCD checkpoints from the original repository. URL: https://github.com/deepglint/unicom/tree/main """ import argparse import collections import os import re import numpy as np import requests import torch from PIL import Image from transformers import CLIPImageProcessor from ...utils import logging from .configuration_mlcd import MLCDVisionConfig from .modeling_mlcd import MLCDVisionModel logging.set_verbosity_info() logger = logging.get_logger(__name__) COMMON_CONFIG_PARAMS = { "mlcd-vit-bigG-patch14-336": { "hidden_size": 1664, "image_size": 336, "intermediate_size": 8192, "num_attention_heads": 16, "num_hidden_layers": 48, "patch_size": 14, "projection_dim": 1024, }, "mlcd-vit-bigG-patch14-448": { "hidden_size": 1664, "image_size": 448, "intermediate_size": 8192, "num_attention_heads": 16, "num_hidden_layers": 48, "patch_size": 14, "projection_dim": 1024, }, } MODEL_NAME_TO_CHECKPOINT_PATH = { # base checkpoints "mlcd-vit-bigG-patch14-336": "MLCD_ViT_bigG_14_336px_pytorch.pt", "mlcd-vit-bigG-patch14-448": "MLCD_ViT_bigG_14_448px_pytorch.pt", } # fmt: off EXPECTED_OUTPUTS = { "mlcd-vit-bigG-patch14-336": torch.tensor([ [-0.8921, -0.1069, 0.2989, 0.6018, -0.5892], [ 0.4093, -1.4592, 0.6048, -0.5147, -0.5929], [ 0.7796, -0.7133, -0.5649, -0.7843, -0.5548], [ 0.0041, 0.0286, 0.4310, -0.1403, -0.2399], [ 0.0839, 0.2152, -0.3822, -0.1668, -0.7886] ]), "mlcd-vit-bigG-patch14-448": torch.tensor([ [-0.8978, -0.1181, 0.4769, 0.4761, -0.5779], [ 0.2640, -2.6150, 0.4853, 0.5743, -1.1003], [ 0.3314, -0.3328, -0.4305, -0.1874, -0.7701], [-1.5174, -1.0238, -1.1854, 0.1749, -0.8786], [ 0.2323, -0.8346, -0.9680, -0.2951, 0.0867], ]), } # fmt: on # fmt: off ORIGINAL_TO_CONVERTED_KEY_MAPPING = { # Vision embeddings r"conv1.weight": r"vision_model.embeddings.patch_embedding.weight", r"class_embedding": r"vision_model.embeddings.class_embedding", r"vision_rotary_embedding": r"vision_model.vision_rotary_embedding", r"class_pos_emb": r"vision_model.class_pos_emb", # Vision encoder r"transformer.resblocks_(\d+).ln_1.weight": r"vision_model.encoder.layers.\1.layer_norm1.weight", r"transformer.resblocks_(\d+).ln_1.bias": r"vision_model.encoder.layers.\1.layer_norm1.bias", r"transformer.resblocks_(\d+).ln_2.weight": r"vision_model.encoder.layers.\1.layer_norm2.weight", r"transformer.resblocks_(\d+).ln_2.bias": r"vision_model.encoder.layers.\1.layer_norm2.bias", r"transformer.resblocks_(\d+).mlp.c_fc.weight": r"vision_model.encoder.layers.\1.mlp.fc1.weight", r"transformer.resblocks_(\d+).mlp.c_fc.bias": r"vision_model.encoder.layers.\1.mlp.fc1.bias", r"transformer.resblocks_(\d+).mlp.c_proj.weight": r"vision_model.encoder.layers.\1.mlp.fc2.weight", r"transformer.resblocks_(\d+).mlp.c_proj.bias": r"vision_model.encoder.layers.\1.mlp.fc2.bias", r"transformer.resblocks_(\d+).attn.(q|k|v|out)_proj.weight": r"vision_model.encoder.layers.\1.self_attn.\2_proj.weight", r"transformer.resblocks_(\d+).attn.(q|k|v|out)_proj.bias": r"vision_model.encoder.layers.\1.self_attn.\2_proj.bias", # Vision norm r"ln_post.weight": r"vision_model.post_layernorm.weight", r"ln_post.bias": r"vision_model.post_layernorm.bias", r"ln_pre.weight": r"vision_model.pre_layernorm.weight", r"ln_pre.bias": r"vision_model.pre_layernorm.bias", } # fmt: on # -------------------------------------------------------------------------------------------- # Model objects: configuration, image processor # -------------------------------------------------------------------------------------------- def get_mlcd_config(model_name: str) -> MLCDVisionConfig: """ Create a configuration for the MLCD model based on the model name. """ assert model_name in COMMON_CONFIG_PARAMS, f"Model {model_name} not found in the list of COMMON_CONFIG_PARAMS." config_params = COMMON_CONFIG_PARAMS[model_name] config = MLCDVisionConfig( hidden_size=config_params["hidden_size"], image_size=config_params["image_size"], intermediate_size=config_params["intermediate_size"], num_attention_heads=config_params["num_attention_heads"], num_hidden_layers=config_params["num_hidden_layers"], patch_size=config_params["patch_size"], projection_dim=config_params["projection_dim"], ) return config def get_mlcd_image_processor(model_name: str) -> CLIPImageProcessor: """ Create an image processor for the MLCD model based on the model name. """ assert model_name in COMMON_CONFIG_PARAMS, f"Model {model_name} not found in the list of COMMON_CONFIG_PARAMS." config_params = COMMON_CONFIG_PARAMS[model_name] image_processor = CLIPImageProcessor( do_center_crop=True, do_normalize=True, do_resize=True, feature_extractor_type="CLIPFeatureExtractor", image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=config_params["image_size"], crop_size=config_params["image_size"], ) return image_processor # -------------------------------------------------------------------------------------------- # Helper functions for state dict conversion # -------------------------------------------------------------------------------------------- def flatten_nested_dict(params: dict, parent_key: str = "", sep: str = ".") -> dict: """ Flatten a nested original checkpoint dictionary into a flat dictionary. """ items = [] for k, v in params.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.abc.MutableMapping): items.extend(flatten_nested_dict(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items) def split_resblocks_layers(state_dict: dict) -> dict: """ Split the resblocks weight into layers. In some cases they are concatenated in the original checkpoints. """ # Make shallow copy state_dict = state_dict.copy() # Split resblocks weight into layers keys = list(state_dict.keys()) for key in keys: if ".resblocks." in key: weight = state_dict.pop(key) for i, weight_i in enumerate(weight): new_name = key.replace("resblocks", f"resblocks_{i}") state_dict[new_name] = weight_i return state_dict def chunk_qkv_for_attn(state_dict: dict) -> dict: """ Chunk the q/k/v weights and biases for the attention layers. """ # Make shallow copy state_dict = state_dict.copy() # Read and process q/k/v weights and biases keys = list(state_dict.keys()) for key in keys: if ".in_proj." in key: weight = state_dict.pop(key) qkv_weights = weight.chunk(3, dim=0) for name, weight_i in zip(["q_proj", "k_proj", "v_proj"], qkv_weights): new_name = key.replace("in_proj", name) state_dict[new_name] = weight_i return state_dict def convert_old_keys_to_new_keys(state_dict_keys: list) -> dict: """ This function should be applied only once, on the concatenated keys to efficiently rename using the key mappings. """ output_dict = {} if state_dict_keys is not None: old_text = "\n".join(state_dict_keys) new_text = old_text for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items(): if replacement is None: new_text = re.sub(pattern, "", new_text) # an empty line continue new_text = re.sub(pattern, replacement, new_text) output_dict = dict(zip(old_text.split("\n"), new_text.split("\n"))) return output_dict # -------------------------------------------------------------------------------------------- # Convert model # -------------------------------------------------------------------------------------------- @torch.no_grad() def convert_mlcd_checkpoint(model_name, input_dir, output_dir, verify_hidden_state=True, push_to_hub=False): """ Copy/paste/tweak model's weights to our MLCD structure. """ # Define MLCD configuration config = get_mlcd_config(model_name) checkpoint = MODEL_NAME_TO_CHECKPOINT_PATH[model_name] checkpoint_path = os.path.join(input_dir, checkpoint) assert os.path.exists(checkpoint_path), f"Checkpoint path ({checkpoint_path}) not found." # Load original checkpoint print(f"Loading checkpoint from {checkpoint_path}...") state_dict = torch.load(checkpoint_path, "cpu") # Flatten nested dictionary print("Flattening nested dictionary...") state_dict = {k.replace("_orig_mod.", ""): v for k, v in state_dict.items()} if "positional_embedding" in state_dict: state_dict.pop("positional_embedding") state_dict = flatten_nested_dict(state_dict) state_dict = split_resblocks_layers(state_dict) state_dict = chunk_qkv_for_attn(state_dict) # Rename and transform weights print("Renaming and transforming weights...") original_keys = list(state_dict.keys()) hf_keys = convert_old_keys_to_new_keys(original_keys) new_state_dict = {} for original_key in original_keys: new_key = hf_keys[original_key] parameter = state_dict.pop(original_key) new_state_dict[new_key] = torch.from_numpy(parameter) # load HuggingFace model print("Loading HuggingFace model...") model = MLCDVisionModel(config).eval() model.load_state_dict(new_state_dict) # Create processor print("Creating processor...") image_processor = get_mlcd_image_processor(model_name) # Verify hidden state if verify_hidden_state: print("Verifying hidden state for {model_name}...") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) pixel_values = image_processor(image, return_tensors="pt")["pixel_values"] last_hidden_state = model(pixel_values, output_hidden_states=True).last_hidden_state[0, :5, :5] expected_hidden_state = EXPECTED_OUTPUTS[model_name] np.testing.assert_allclose(last_hidden_state.cpu().numpy(), expected_hidden_state.numpy(), atol=1e-4) # Save model if output_dir is not None: dst_dir = os.path.join(output_dir, model_name) print(f"Saving model {model_name} to {dst_dir}...") model.save_pretrained(dst_dir) print(f"Saving processor to {dst_dir}...") image_processor.save_pretrained(dst_dir) if push_to_hub: print(f"Pushing model and processor for {model_name} to the HuggingFace Hub...") model.push_to_hub(f"deepglint-hf/{model_name}", private=True) image_processor.push_to_hub(f"deepglint-hf/{model_name}", private=True) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="mlcd-vit-bigG-patch14-448", type=str, choices=MODEL_NAME_TO_CHECKPOINT_PATH.keys(), help="Name of the model you'd like to convert.", ) parser.add_argument( "--input_dir", default="mlcd/original", help="Location of MLCD original weights", ) parser.add_argument( "--output_dir", default="mlcd/checkpoint", help="Location to write HF model and processor", ) parser.add_argument( "--verify_hidden_state", action="store_true", help="Whether to verify hidden_state against the original implementation.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_mlcd_checkpoint( args.model_name, args.input_dir, args.output_dir, args.verify_hidden_state, args.push_to_hub )
transformers/src/transformers/models/mlcd/convert_mlcd_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/mlcd/convert_mlcd_weights_to_hf.py", "repo_id": "transformers", "token_count": 5766 }
503
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MobileNetV2 model configuration""" from collections import OrderedDict from collections.abc import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class MobileNetV2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileNetV2Model`]. It is used to instantiate a MobileNetV2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MobileNetV2 [google/mobilenet_v2_1.0_224](https://huggingface.co/google/mobilenet_v2_1.0_224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. depth_multiplier (`float`, *optional*, defaults to 1.0): Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32 channels. This is sometimes also called "alpha" or "width multiplier". depth_divisible_by (`int`, *optional*, defaults to 8): The number of channels in each layer will always be a multiple of this number. min_depth (`int`, *optional*, defaults to 8): All layers will have at least this many channels. expand_ratio (`float`, *optional*, defaults to 6.0): The number of output channels of the first layer in each block is input channels times expansion ratio. output_stride (`int`, *optional*, defaults to 32): The ratio between the spatial resolution of the input and output feature maps. By default the model reduces the input dimensions by a factor of 32. If `output_stride` is 8 or 16, the model uses dilated convolutions on the depthwise layers instead of regular convolutions, so that the feature maps never become more than 8x or 16x smaller than the input image. first_layer_is_expansion (`bool`, *optional*, defaults to `True`): True if the very first convolution layer is also the expansion layer for the first expansion block. finegrained_output (`bool`, *optional*, defaults to `True`): If true, the number of output channels in the final convolution layer will stay large (1280) even if `depth_multiplier` is less than 1. hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`): The non-linear activation function (function or string) in the Transformer encoder and convolution layers. tf_padding (`bool`, *optional*, defaults to `True`): Whether to use TensorFlow padding rules on the convolution layers. classifier_dropout_prob (`float`, *optional*, defaults to 0.8): The dropout ratio for attached classifiers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 0.001): The epsilon used by the layer normalization layers. semantic_loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function of the semantic segmentation model. Example: ```python >>> from transformers import MobileNetV2Config, MobileNetV2Model >>> # Initializing a "mobilenet_v2_1.0_224" style configuration >>> configuration = MobileNetV2Config() >>> # Initializing a model from the "mobilenet_v2_1.0_224" style configuration >>> model = MobileNetV2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mobilenet_v2" def __init__( self, num_channels=3, image_size=224, depth_multiplier=1.0, depth_divisible_by=8, min_depth=8, expand_ratio=6.0, output_stride=32, first_layer_is_expansion=True, finegrained_output=True, hidden_act="relu6", tf_padding=True, classifier_dropout_prob=0.8, initializer_range=0.02, layer_norm_eps=0.001, semantic_loss_ignore_index=255, **kwargs, ): super().__init__(**kwargs) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero.") self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.depth_divisible_by = depth_divisible_by self.min_depth = min_depth self.expand_ratio = expand_ratio self.output_stride = output_stride self.first_layer_is_expansion = first_layer_is_expansion self.finegrained_output = finegrained_output self.hidden_act = hidden_act self.tf_padding = tf_padding self.classifier_dropout_prob = classifier_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.semantic_loss_ignore_index = semantic_loss_ignore_index class MobileNetV2OnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})]) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})]) @property def atol_for_validation(self) -> float: return 1e-4 __all__ = ["MobileNetV2Config", "MobileNetV2OnnxConfig"]
transformers/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py/0
{ "file_path": "transformers/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py", "repo_id": "transformers", "token_count": 2489 }
504
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert MobileViTV2 checkpoints from the ml-cvnets library.""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTV2Config, MobileViTV2ForImageClassification, MobileViTV2ForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def load_orig_config_file(orig_cfg_file): print("Loading config file...") def flatten_yaml_as_dict(d, parent_key="", sep="."): items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.abc.MutableMapping): items.extend(flatten_yaml_as_dict(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items) config = argparse.Namespace() with open(orig_cfg_file, "r") as yaml_file: try: cfg = yaml.load(yaml_file, Loader=yaml.FullLoader) flat_cfg = flatten_yaml_as_dict(cfg) for k, v in flat_cfg.items(): setattr(config, k, v) except yaml.YAMLError as exc: logger.error(f"Error while loading config file: {orig_cfg_file}. Error message: {str(exc)}") return config def get_mobilevitv2_config(task_name, orig_cfg_file): config = MobileViTV2Config() is_segmentation_model = False # dataset if task_name.startswith("imagenet1k_"): config.num_labels = 1000 if int(task_name.strip().split("_")[-1]) == 384: config.image_size = 384 else: config.image_size = 256 filename = "imagenet-1k-id2label.json" elif task_name.startswith("imagenet21k_to_1k_"): config.num_labels = 21000 if int(task_name.strip().split("_")[-1]) == 384: config.image_size = 384 else: config.image_size = 256 filename = "imagenet-22k-id2label.json" elif task_name.startswith("ade20k_"): config.num_labels = 151 config.image_size = 512 filename = "ade20k-id2label.json" is_segmentation_model = True elif task_name.startswith("voc_"): config.num_labels = 21 config.image_size = 512 filename = "pascal-voc-id2label.json" is_segmentation_model = True # orig_config orig_config = load_orig_config_file(orig_cfg_file) assert getattr(orig_config, "model.classification.name", -1) == "mobilevit_v2", "Invalid model" config.width_multiplier = getattr(orig_config, "model.classification.mitv2.width_multiplier", 1.0) assert getattr(orig_config, "model.classification.mitv2.attn_norm_layer", -1) == "layer_norm_2d", ( "Norm layers other than layer_norm_2d is not supported" ) config.hidden_act = getattr(orig_config, "model.classification.activation.name", "swish") # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: config.output_stride = getattr(orig_config, "model.segmentation.output_stride", 16) if "_deeplabv3" in task_name: config.atrous_rates = getattr(orig_config, "model.segmentation.deeplabv3.aspp_rates", [12, 24, 36]) config.aspp_out_channels = getattr(orig_config, "model.segmentation.deeplabv3.aspp_out_channels", 512) config.aspp_dropout_prob = getattr(orig_config, "model.segmentation.deeplabv3.aspp_dropout", 0.1) # id2label repo_id = "huggingface/label-files" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def create_rename_keys(state_dict, base_model=False): if base_model: model_prefix = "" else: model_prefix = "mobilevitv2." rename_keys = [] for k in state_dict: if k[:8] == "encoder.": k_new = k[8:] else: k_new = k if ".block." in k: k_new = k_new.replace(".block.", ".") if ".conv." in k: k_new = k_new.replace(".conv.", ".convolution.") if ".norm." in k: k_new = k_new.replace(".norm.", ".normalization.") if "conv_1." in k: k_new = k_new.replace("conv_1.", f"{model_prefix}conv_stem.") for i in [1, 2]: if f"layer_{i}." in k: k_new = k_new.replace(f"layer_{i}.", f"{model_prefix}encoder.layer.{i - 1}.layer.") if ".exp_1x1." in k: k_new = k_new.replace(".exp_1x1.", ".expand_1x1.") if ".red_1x1." in k: k_new = k_new.replace(".red_1x1.", ".reduce_1x1.") for i in [3, 4, 5]: if f"layer_{i}.0." in k: k_new = k_new.replace(f"layer_{i}.0.", f"{model_prefix}encoder.layer.{i - 1}.downsampling_layer.") if f"layer_{i}.1.local_rep.0." in k: k_new = k_new.replace(f"layer_{i}.1.local_rep.0.", f"{model_prefix}encoder.layer.{i - 1}.conv_kxk.") if f"layer_{i}.1.local_rep.1." in k: k_new = k_new.replace(f"layer_{i}.1.local_rep.1.", f"{model_prefix}encoder.layer.{i - 1}.conv_1x1.") for i in [3, 4, 5]: if i == 3: j_in = [0, 1] elif i == 4: j_in = [0, 1, 2, 3] elif i == 5: j_in = [0, 1, 2] for j in j_in: if f"layer_{i}.1.global_rep.{j}." in k: k_new = k_new.replace( f"layer_{i}.1.global_rep.{j}.", f"{model_prefix}encoder.layer.{i - 1}.transformer.layer.{j}." ) if f"layer_{i}.1.global_rep.{j + 1}." in k: k_new = k_new.replace( f"layer_{i}.1.global_rep.{j + 1}.", f"{model_prefix}encoder.layer.{i - 1}.layernorm." ) if f"layer_{i}.1.conv_proj." in k: k_new = k_new.replace( f"layer_{i}.1.conv_proj.", f"{model_prefix}encoder.layer.{i - 1}.conv_projection." ) if "pre_norm_attn.0." in k: k_new = k_new.replace("pre_norm_attn.0.", "layernorm_before.") if "pre_norm_attn.1." in k: k_new = k_new.replace("pre_norm_attn.1.", "attention.") if "pre_norm_ffn.0." in k: k_new = k_new.replace("pre_norm_ffn.0.", "layernorm_after.") if "pre_norm_ffn.1." in k: k_new = k_new.replace("pre_norm_ffn.1.", "ffn.conv1.") if "pre_norm_ffn.3." in k: k_new = k_new.replace("pre_norm_ffn.3.", "ffn.conv2.") if "classifier.1." in k: k_new = k_new.replace("classifier.1.", "classifier.") if "seg_head." in k: k_new = k_new.replace("seg_head.", "segmentation_head.") if ".aspp_layer." in k: k_new = k_new.replace(".aspp_layer.", ".") if ".aspp_pool." in k: k_new = k_new.replace(".aspp_pool.", ".") rename_keys.append((k, k_new)) return rename_keys def remove_unused_keys(state_dict): """remove unused keys (e.g.: seg_head.aux_head)""" keys_to_ignore = [] for k in state_dict: if k.startswith("seg_head.aux_head."): keys_to_ignore.append(k) for k in keys_to_ignore: state_dict.pop(k, None) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_mobilevitv2_checkpoint(task_name, checkpoint_path, orig_config_path, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our MobileViTV2 structure. """ config = get_mobilevitv2_config(task_name, orig_config_path) # load original state_dict checkpoint = torch.load(checkpoint_path, map_location="cpu", weights_only=True) # load huggingface model if task_name.startswith("ade20k_") or task_name.startswith("voc_"): model = MobileViTV2ForSemanticSegmentation(config).eval() base_model = False else: model = MobileViTV2ForImageClassification(config).eval() base_model = False # remove and rename some keys of load the original model state_dict = checkpoint remove_unused_keys(state_dict) rename_keys = create_rename_keys(state_dict, base_model=base_model) for rename_key_src, rename_key_dest in rename_keys: rename_key(state_dict, rename_key_src, rename_key_dest) # load modified state_dict model.load_state_dict(state_dict) # Check outputs on an image, prepared by MobileViTImageProcessor image_processor = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32) encoding = image_processor(images=prepare_img(), return_tensors="pt") outputs = model(**encoding) # verify classification model if task_name.startswith("imagenet"): logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) if task_name.startswith("imagenet1k_256") and config.width_multiplier == 1.0: # expected_logits for base variant expected_logits = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01]) assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {task_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " """ Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 """ ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument( "--orig_config_path", required=True, type=str, help="Path to the original config file. yaml.load will be used to load the file, please be wary of which file you're loading.", ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) args = parser.parse_args() convert_mobilevitv2_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
transformers/src/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py", "repo_id": "transformers", "token_count": 5971 }
505
# coding=utf-8 # Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Moshi model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig logger = logging.get_logger(__name__) class MoshiDepthConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MoshiDepthDecoder`]. It is used to instantiate a Moshi depth decoder model according to the specified arguments, defining the Moshi depth decoder config. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the MoshiDepthDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MoshiDepthDecoder`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer of the depth decoder. input_size (`int`, *optional*, defaults to 4096): Dimensionality of the input hidden states. Used to connect the main decoder to the depth decoder. num_hidden_layers (`int`, *optional*, defaults to 6): Number of depth decoder layers. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the depth decoder block. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. audio_vocab_size (`int`, *optional*, defaults to 2048): Vocabulary size of the audio part of model. Defines the number of different tokens that can be represented by the `audio_codes` passed when calling the Moshi models. max_position_embeddings (`int`, *optional*, defaults to 9): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the depth decoder. head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`): The attention head dimension. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. sliding_window (`int`, *optional*, defaults to 8): Sliding window attention window size. If not specified, will default to `8`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ffn_dim (`int`, *optional*, defaults to 5632): Dimensionality of the "intermediate" (often named feed-forward) layer in the depth decoder block. Must be even. rms_norm_eps (`float`, *optional*, defaults to 1e-08): The epsilon used by the rms normalization layers. num_codebooks (`int`, *optional*, defaults to 8): The number of audio codebooks for each audio channels. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings kwargs (*optional*): Dictionary of keyword arguments. Notably: - **audio_encoder_config** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. Example: ```python >>> from transformers import ( ... MoshiDepthConfig, ... MoshiDepthDecoder, ... ) >>> configuration = MoshiDepthConfig() >>> # Initializing a MoshiDepthDecoder (with random weights) from the kmhf/hf-moshiko style configuration >>> model = MoshiDepthDecoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "moshi_depth" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=32000, hidden_size=1024, input_size=4096, num_hidden_layers=6, num_attention_heads=16, num_key_value_heads=None, audio_vocab_size=2048, max_position_embeddings=9, hidden_act="silu", head_dim=None, initializer_range=0.02, use_cache=True, sliding_window=8, attention_dropout=0.0, ffn_dim=5632, rms_norm_eps=1e-8, num_codebooks=8, tie_word_embeddings=False, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.input_size = input_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads if num_key_value_heads is not None else num_attention_heads self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.head_dim = head_dim or hidden_size // num_attention_heads self.initializer_range = initializer_range self.use_cache = use_cache self.sliding_window = sliding_window self.attention_dropout = attention_dropout if ffn_dim % 2 == 1: raise ValueError(f"`ffn_dim={ffn_dim}` must be even.") self.ffn_dim = ffn_dim self.rms_norm_eps = rms_norm_eps self.num_codebooks = num_codebooks self.audio_vocab_size = audio_vocab_size super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) class MoshiConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MoshiModel`]. It is used to instantiate a Moshi model according to the specified arguments, defining the audio encoder, Moshi depth decoder and Moshi decoder configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Moshiko model, e.g. [kmhf/hf-moshiko](https://huggingface.co/kmhf/hf-moshiko) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the MoshiDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MoshiDecoder`]. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the layers and the pooler layer of the main decoder. num_hidden_layers (`int`, *optional*, defaults to 32): Number of decoder layers. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the main decoder block. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. audio_vocab_size (`int`, *optional*): Vocabulary size of the audio part of model. Defines the number of different tokens that can be represented by the `audio_codes` passed when calling the Moshi models. max_position_embeddings (`int`, *optional*, defaults to 3000): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`): The attention head dimension. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. sliding_window (`int`, *optional*, defaults to 3000): Sliding window attention window size. If not specified, will default to `3000`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ffn_dim (`int`, *optional*, defaults to 22528): Dimensionality of the "intermediate" (often named feed-forward) layer in the main decoder block. Must be even. rms_norm_eps (`float`, *optional*, defaults to 1e-08): The epsilon used by the rms normalization layers. num_codebooks (`int`, *optional*, defaults to 8): The number of audio codebooks for each audio channels. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings kwargs (*optional*): Dictionary of keyword arguments. Notably: - **audio_encoder_config** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. - **depth__config** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the depth decoder config. Example: ```python >>> from transformers import ( ... MoshiConfig, ... MoshiForConditionalGeneration, ... ) >>> configuration = MoshiConfig() >>> # Initializing a MoshiForConditionalGeneration (with random weights) from the kmhf/hf-moshiko style configuration >>> model = MoshiForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # Saving the model, including its configuration >>> model.save_pretrained("kmhf/hf-moshiko") >>> # loading model and config from pretrained folder >>> moshi_config = MoshiConfig.from_pretrained("kmhf/hf-moshiko") >>> model = MoshiForConditionalGeneration.from_pretrained("kmhf/hf-moshiko", config=moshi_config) ```""" model_type = "moshi" keys_to_ignore_at_inference = ["past_key_values"] sub_configs = {"audio_encoder_config": AutoConfig, "depth_decoder_config": MoshiDepthConfig} def __init__( self, vocab_size=32000, hidden_size=4096, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, audio_vocab_size=None, max_position_embeddings=3000, rope_theta=10000.0, hidden_act="silu", head_dim=None, initializer_range=0.02, use_cache=True, sliding_window=3000, attention_dropout=0.0, ffn_dim=22528, rms_norm_eps=1e-8, num_codebooks=8, tie_word_embeddings=False, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads if num_key_value_heads is not None else num_attention_heads self.max_position_embeddings = max_position_embeddings self.rope_theta = rope_theta self.hidden_act = hidden_act self.head_dim = head_dim or hidden_size // num_attention_heads self.initializer_range = initializer_range self.use_cache = use_cache self.sliding_window = sliding_window self.attention_dropout = attention_dropout if ffn_dim % 2 == 1: raise ValueError(f"`ffn_dim={ffn_dim}` must be even.") self.ffn_dim = ffn_dim self.rms_norm_eps = rms_norm_eps self.num_codebooks = num_codebooks audio_encoder_config = kwargs.pop("audio_encoder_config", {}) audio_encoder_model_type = audio_encoder_config.pop("model_type", "mimi") self.audio_encoder_config = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config) if self.num_codebooks > self.audio_encoder_config.num_codebooks: raise ValueError( f"`num_codebooks={num_codebooks}` is greater than the maximum number of codebooks that the audio encoder can deal with ({self.audio_encoder_config.num_codebooks}). Please lower it." ) self.audio_vocab_size = ( self.audio_encoder_config.codebook_size if audio_vocab_size is None else audio_vocab_size ) depth_decoder_config = kwargs.pop("depth_decoder_config", {}) depth_decoder_config.update( { "audio_vocab_size": self.audio_vocab_size, "input_size": hidden_size, "vocab_size": vocab_size, "num_codebooks": num_codebooks, } ) self.depth_decoder_config = MoshiDepthConfig(**depth_decoder_config) super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) @property def sampling_rate(self): return self.audio_encoder_config.sampling_rate @classmethod def from_audio_encoder_config( cls, audio_encoder_config: PretrainedConfig, **kwargs, ): r""" Instantiate a [`MoshiConfig`] (or a derived class) from an audio encoder configuration. Returns: [`MoshiConfig`]: An instance of a configuration object """ return cls( audio_encoder_config=audio_encoder_config.to_dict(), **kwargs, ) __all__ = ["MoshiConfig", "MoshiDepthConfig"]
transformers/src/transformers/models/moshi/configuration_moshi.py/0
{ "file_path": "transformers/src/transformers/models/moshi/configuration_moshi.py", "repo_id": "transformers", "token_count": 6212 }
506
# coding=utf-8 # Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Musicgen Melody model.""" import copy import inspect import math import random from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Callable, Optional, Union import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import ( ClassifierFreeGuidanceLogitsProcessor, GenerationConfig, GenerationMixin, GenerationMode, LogitsProcessorList, StoppingCriteriaList, ) from ...modeling_attn_mask_utils import ( _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from ...modeling_flash_attention_utils import ( FlashAttentionKwargs, ) from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...utils.deprecation import deprecate_kwarg from ..auto.configuration_auto import AutoConfig from ..auto.modeling_auto import AutoModel, AutoModelForTextEncoding from .configuration_musicgen_melody import MusicgenMelodyConfig, MusicgenMelodyDecoderConfig if is_torch_flex_attn_available(): from ...integrations.flex_attention import make_flex_block_causal_mask if TYPE_CHECKING: from ...generation.streamers import BaseStreamer logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Base class for Musicgen Melody autoregressive outputs. """ ) class MusicgenMelodyOutputWithPast(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of conditional hidden-states representing the concatenation of the projected text encoder output and the projected audio encoder output. Used as a conditional signal. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[torch.FloatTensor] = None # Copied from transformers.models.musicgen.modeling_musicgen.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ # transpose to get (bsz, num_codebooks, seq_len) input_ids = input_ids.transpose(1, 2) shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() if decoder_start_token_id is None: raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenSinusoidalPositionalEmbedding with Musicgen->MusicgenMelody class MusicgenMelodySinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int): super().__init__() self.embedding_dim = embedding_dim self.make_weights(num_positions, embedding_dim) def make_weights(self, num_embeddings: int, embedding_dim: int): emb_weights = self.get_embedding(num_embeddings, embedding_dim) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer("weights", emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.cos(emb), torch.sin(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) return emb.to(torch.get_default_dtype()) @torch.no_grad() # Ignore copy def forward(self, inputs_embeds: torch.Tensor, past_key_values_length: int = 0): bsz, seq_len, _ = inputs_embeds.size() # Create the position ids from the input token ids. position_ids = (torch.arange(seq_len) + past_key_values_length).to(inputs_embeds.device) # expand embeddings if needed if seq_len > self.weights.size(0): self.make_weights(seq_len, self.embedding_dim) return self.weights.index_select(0, position_ids.view(-1)).detach() # Copied from transformers.models.bart.modeling_bart.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: Optional[float] = None, dropout: float = 0.0, head_mask: Optional[torch.Tensor] = None, **kwargs, ): if scaling is None: scaling = query.size(-1) ** -0.5 attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) if head_mask is not None: attn_weights = attn_weights * head_mask.view(1, -1, 1, 1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenAttention with Musicgen->MusicgenMelody class MusicgenMelodyAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: Optional[float] = 0.0, is_decoder: Optional[bool] = False, bias: Optional[bool] = True, is_causal: Optional[bool] = False, config: Optional[MusicgenMelodyConfig] = None, layer_idx: Optional[int] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, # TODO: we need a refactor so that the different attention modules can get their specific kwargs # ATM, we have mixed things encoder, decoder, and encoder-decoder attn **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) # get query proj query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_layer from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights class MusicgenMelodyDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MusicgenMelodyDecoderConfig, layer_idx=None): super().__init__() self.embed_dim = config.hidden_size self.self_attn = MusicgenMelodyAttention( embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, is_causal=True, config=config, layer_idx=layer_idx, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=False) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=False) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, cache_position: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(attention_heads,)`. past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states return hidden_states, self_attn_weights @auto_docstring # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenPreTrainedModel with Musicgen->MusicgenMelody class MusicgenMelodyPreTrainedModel(PreTrainedModel): config: MusicgenMelodyDecoderConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["MusicgenMelodyDecoderLayer", "MusicgenMelodyAttention"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def _init_weights(self, module): std = self.config.initializer_factor if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenDecoder with MUSICGEN->MUSICGEN_MELODY,Musicgen->MusicgenMelody class MusicgenMelodyDecoder(MusicgenMelodyPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MusicgenMelodyDecoderLayer`] """ def __init__(self, config: MusicgenMelodyDecoderConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.max_target_positions = config.max_position_embeddings self.d_model = config.hidden_size self.num_codebooks = config.num_codebooks self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 embed_dim = config.vocab_size + 1 self.embed_tokens = nn.ModuleList( [nn.Embedding(embed_dim, config.hidden_size) for _ in range(config.num_codebooks)] ) self.embed_positions = MusicgenMelodySinusoidalPositionalEmbedding( config.max_position_embeddings, config.hidden_size, ) self.layers = nn.ModuleList( [MusicgenMelodyDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)] ) self.layer_norm = nn.LayerNorm(config.hidden_size) self.attn_implementation = config._attn_implementation self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @auto_docstring # Ignore copy def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, BaseModelOutputWithPast]: r""" input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states representing the concatenation of the text encoder output and the processed audio encoder output. Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing attention on conditional hidden states. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: # (bsz * codebooks, seq_len) -> (bsz, codebooks, seq_len) input = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) bsz, num_codebooks, seq_len = input.shape input_shape = (bsz, seq_len) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1:] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..." ) use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = sum([self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks)]) if encoder_hidden_states is not None: # take care of attention masks if encoder_attention_mask is not None and attention_mask is None: attention_mask = torch.ones(inputs_embeds.shape[:2], device=inputs_embeds.device) if attention_mask is not None: if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_states.shape[:2], device=attention_mask.device) attention_mask = torch.cat([encoder_attention_mask, attention_mask], dim=1) # fuse encoder_hidden_states and inputs_embeds inputs_embeds = torch.cat([encoder_hidden_states, inputs_embeds], dim=1) input_shape = inputs_embeds.size()[:-1] attention_mask = self._update_causal_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length, ) # embed positions positions = self.embed_positions(inputs_embeds, past_key_values_length) hidden_states = inputs_embeds + positions.to(inputs_embeds.device) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The `head_mask` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, ) def _update_causal_mask( self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int, ): if self.config._attn_implementation == "flash_attention_2": # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, input_shape, inputs_embeds, past_key_values_length, ) elif self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) # Other attention flavors support in-built causal (when `mask is None`) # while we need to create our specific block mask regardless elif attention_mask is None: attention_mask = make_flex_block_causal_mask( torch.ones( size=(input_shape), device=inputs_embeds.device, ) ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) return attention_mask # Ignore copy def _update_cross_attn_mask( self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, ): # MusicgenMelody doesn't apply cross attention, hence it's ignored here # and only exists to not confuse any copy checks pass @auto_docstring # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenModel with MUSICGEN->MUSICGEN_MELODY,Musicgen->MusicgenMelody class MusicgenMelodyModel(MusicgenMelodyPreTrainedModel): def __init__(self, config: MusicgenMelodyDecoderConfig): super().__init__(config) self.decoder = MusicgenMelodyDecoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_decoder(self): return self.decoder @auto_docstring # Ignore copy def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, BaseModelOutputWithPast]: r""" input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states representing the concatenation of the text encoder output and the processed audio encoder output. Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing attention on conditional hidden states. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) if not return_dict: return decoder_outputs return BaseModelOutputWithPast( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, ) @auto_docstring( custom_intro=""" The Musicgen Melody decoder model with a language modelling head on top. """ ) # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenForCausalLM with MUSICGEN->MUSICGEN_MELODY,Musicgen->MusicgenMelody,MusicGen->Musicgen Melody class MusicgenMelodyForCausalLM(MusicgenMelodyPreTrainedModel, GenerationMixin): def __init__(self, config: MusicgenMelodyDecoderConfig): super().__init__(config) self.model = MusicgenMelodyModel(config) self.num_codebooks = config.num_codebooks self.lm_heads = nn.ModuleList( [nn.Linear(config.hidden_size, config.vocab_size, bias=False) for _ in range(config.num_codebooks)] ) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_heads def set_output_embeddings(self, new_embeddings): self.lm_heads = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @auto_docstring # Ignore copy def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, MusicgenMelodyOutputWithPast]: r""" input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are input IDs?](../glossary#input-ids) <Tip warning={true}> The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `input_ids`. </Tip> encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states representing the concatenation of the text encoder output and the processed audio encoder output. Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing attention on conditional hidden states. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (labels is not None) and (input_ids is None and inputs_embeds is None): input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.bos_token_id) outputs = self.model( input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = outputs[0] lm_logits = torch.stack([head(hidden_states) for head in self.lm_heads], dim=1) loss = None if labels is not None: # since encoder hidden states have been concatenated to the decoder hidden states, # we take the last timestamps corresponding to labels logits = lm_logits[:, :, -labels.shape[1] :] loss_fct = CrossEntropyLoss() loss = torch.zeros([], device=self.device) # per codebook cross-entropy # ref: https://github.com/facebookresearch/audiocraft/blob/69fea8b290ad1b4b40d28f92d1dfc0ab01dbab85/audiocraft/solvers/musicgen.py#L242-L243 # -100 labels are ignored labels = labels.masked_fill(labels == self.config.pad_token_id, -100) # per codebook cross-entropy for codebook in range(self.config.num_codebooks): codebook_logits = logits[:, codebook].contiguous().view(-1, logits.shape[-1]) codebook_labels = labels[..., codebook].contiguous().view(-1) loss += loss_fct(codebook_logits, codebook_labels) loss = loss / self.config.num_codebooks # (bsz, num_codebooks, seq_len, vocab_size) -> (bsz * num_codebooks, seq_len, vocab_size) lm_logits = lm_logits.reshape(-1, *lm_logits.shape[2:]) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MusicgenMelodyOutputWithPast( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Ignore copy def prepare_inputs_for_generation( self, input_ids, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, past_key_values=None, use_cache=True, delay_pattern_mask=None, guidance_scale=None, **kwargs, ): # Overwritten -- MusicGen has custom processing if delay_pattern_mask is None: input_ids, delay_pattern_mask = self.build_delay_pattern_mask( input_ids, pad_token_id=self.generation_config.pad_token_id, max_length=self.generation_config.max_length, ) # apply the delay pattern mask input_ids = self.apply_delay_pattern_mask(input_ids, delay_pattern_mask) if guidance_scale is not None and guidance_scale > 1: # for classifier free guidance we need to replicate the decoder args across the batch dim (we'll split these # before sampling) input_ids = input_ids.repeat((2, 1)) if attention_mask is not None: attention_mask = attention_mask.repeat((2, 1)) if encoder_hidden_states is not None: encoder_hidden_states = torch.concatenate( [encoder_hidden_states, torch.zeros_like(encoder_hidden_states)], dim=0 ) if encoder_attention_mask is not None: encoder_attention_mask = torch.concatenate( encoder_attention_mask, torch.zeros_like(encoder_attention_mask), dim=0 ) if past_key_values is not None: input_ids = input_ids[:, -1:] # we only want to use conditional signal in the 1st generation step but keeping the attention mask encoder_hidden_states = None return { "input_ids": input_ids, "attention_mask": attention_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, "head_mask": head_mask, "past_key_values": past_key_values, "use_cache": use_cache, } def build_delay_pattern_mask( self, input_ids: torch.LongTensor, pad_token_id: int, max_length: Optional[int] = None ): """Build a delayed pattern mask to the input_ids. Each codebook is offset by the previous codebook by one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there are 4 codebooks and a max sequence length of 8, we have the delayed pattern mask of shape `(codebooks, seq_len)`: - [P, -1, -1, -1, -1, P, P, P] - [P, P, -1, -1, -1, -1, P, P] - [P, P, P, -1, -1, -1, -1, P] - [P, P, P, P, -1, -1, -1, -1] where P is the special padding token id and -1 indicates that the token is valid for prediction. If we include a prompt (decoder input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the mask is set to the value in the prompt: - [P, a, b, -1, -1, P, P, P] - [P, P, c, d, -1, -1, P, P] - [P, P, P, e, f, -1, -1, P] - [P, P, P, P, g, h, -1, -1] where a-h indicate the input prompt (decoder input ids) that are offset by 1. Now, we only override the -1 tokens in our prediction. """ # (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len) input_ids = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) bsz, num_codebooks, seq_len = input_ids.shape max_length = max_length if max_length is not None else self.generation_config.max_length input_ids_shifted = ( torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1 ) channel_codebooks = num_codebooks // 2 if self.config.audio_channels == 2 else num_codebooks # we only apply the mask if we have a large enough seq len - otherwise we return as is if max_length < 2 * channel_codebooks - 1: return input_ids.reshape(bsz * num_codebooks, -1), input_ids_shifted.reshape(bsz * num_codebooks, -1) # fill the shifted ids with the prompt entries, offset by the codebook idx for codebook in range(channel_codebooks): if self.config.audio_channels == 1: # mono channel - loop over the codebooks one-by-one input_ids_shifted[:, codebook, codebook : seq_len + codebook] = input_ids[:, codebook] else: # left/right channels are interleaved in the generated codebooks, so handle one then the other input_ids_shifted[:, 2 * codebook, codebook : seq_len + codebook] = input_ids[:, 2 * codebook] input_ids_shifted[:, 2 * codebook + 1, codebook : seq_len + codebook] = input_ids[:, 2 * codebook + 1] # construct a pattern mask that indicates the positions of padding tokens for each codebook # first fill the upper triangular part (the EOS padding) delay_pattern = torch.triu( torch.ones((channel_codebooks, max_length), dtype=torch.bool), diagonal=max_length - channel_codebooks + 1 ) # then fill the lower triangular part (the BOS padding) delay_pattern = delay_pattern + torch.tril(torch.ones((channel_codebooks, max_length), dtype=torch.bool)) if self.config.audio_channels == 2: # for left/right channel we need to duplicate every row of the pattern mask in an interleaved fashion delay_pattern = delay_pattern.repeat_interleave(2, dim=0) mask = ~delay_pattern.to(input_ids.device) input_ids = mask * input_ids_shifted + ~mask * pad_token_id # find the first position to start generating - this is the first place we have the -1 token # and will always be in the first codebook (since it has no codebook offset) first_codebook_ids = input_ids[:, 0, :] start_ids = (first_codebook_ids == -1).nonzero()[:, 1] if len(start_ids) > 0: first_start_id = min(start_ids) else: # we have no tokens that need to be filled - return entire matrix of input ids first_start_id = seq_len # (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len) pattern_mask = input_ids.reshape(bsz * num_codebooks, -1) input_ids = input_ids[..., :first_start_id].reshape(bsz * num_codebooks, -1) return input_ids, pattern_mask @staticmethod def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask): """Apply a delay pattern mask to the decoder input ids, only preserving predictions where the mask is set to -1, and otherwise setting to the value detailed in the mask.""" seq_len = input_ids.shape[-1] decoder_pad_token_mask = decoder_pad_token_mask[..., :seq_len] input_ids = torch.where(decoder_pad_token_mask == -1, input_ids, decoder_pad_token_mask) return input_ids @torch.no_grad() # Ignore copy def generate( self, inputs: Optional[torch.Tensor] = None, generation_config: Optional[GenerationConfig] = None, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, synced_gpus: Optional[bool] = None, streamer: Optional["BaseStreamer"] = None, **kwargs, ): """ Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed to avoid deadlocking with `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3). streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. kwargs (`dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateDecoderOnlyOutput`], - [`~generation.GenerateBeamDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) # 2. Set generation parameters if not already defined logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() requires_attention_mask = "encoder_outputs" not in model_kwargs kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None # 3. Define model inputs` input_ids, model_input_name, model_kwargs = self._prepare_model_inputs( inputs, generation_config.bos_token_id, model_kwargs ) batch_size = input_ids.shape[0] // self.num_codebooks self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=input_ids.device) # 4. Define other model kwargs model_kwargs["use_cache"] = generation_config.use_cache model_kwargs["guidance_scale"] = generation_config.guidance_scale if model_kwargs.get("attention_mask", None) is None and requires_attention_mask: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( input_ids, generation_config, model_kwargs ) # 5. Prepare `max_length` depending on other stopping criteria. input_ids_length = input_ids.shape[-1] has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None generation_config = self._prepare_generated_length( generation_config=generation_config, has_default_max_length=has_default_max_length, has_default_min_length=has_default_min_length, model_input_name=model_input_name, inputs_tensor=input_ids, input_ids_length=input_ids_length, ) # 6. Prepare `input_ids` which will be used for auto-regressive generation # Build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to Musicgen) input_ids, delay_pattern_mask = self.build_delay_pattern_mask( input_ids, pad_token_id=generation_config._decoder_start_token_tensor, max_length=generation_config.max_length, ) if streamer is not None: streamer.put(input_ids.cpu()) # stash the delay mask so that we don't have to recompute it in each forward pass model_kwargs["delay_pattern_mask"] = delay_pattern_mask # 7. determine generation mode generation_mode = generation_config.get_generation_mode() # 8. prepare batched CFG externally (to enable coexistence with the unbatched CFG) if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) generation_config.guidance_scale = None # 9. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_length, encoder_input_ids=input_ids, prefix_allowed_tokens_fn=None, logits_processor=logits_processor, device=input_ids.device, ) # 10. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( generation_config=generation_config, stopping_criteria=stopping_criteria ) if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH): # expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_return_sequences, **model_kwargs, ) # 11. run sample outputs = self._sample( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, generation_config=generation_config, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs, ) else: raise ValueError( "Got incompatible mode for generation, should be one of greedy or sampling. " "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`." ) if generation_config.return_dict_in_generate: output_ids = outputs.sequences else: output_ids = outputs # apply the pattern mask to the final ids output_ids = self.apply_delay_pattern_mask(output_ids, model_kwargs["delay_pattern_mask"]) # revert the pattern delay mask by filtering the pad token id output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape( batch_size, self.num_codebooks, -1 ) if generation_config.return_dict_in_generate: outputs.sequences = output_ids return outputs else: return output_ids @auto_docstring class MusicgenMelodyForConditionalGeneration(PreTrainedModel, GenerationMixin): config: MusicgenMelodyConfig main_input_name = "input_ids" supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def __init__( self, config: MusicgenMelodyConfig = None, text_encoder: Optional[PreTrainedModel] = None, audio_encoder: Optional[PreTrainedModel] = None, decoder: Optional[MusicgenMelodyForCausalLM] = None, ): r""" text_encoder (`PreTrainedModel`, *optional*): The text encoder model that encodes text into hidden states for conditioning. audio_encoder (`PreTrainedModel`, *optional*): The audio encoder model that encodes audio into hidden states for conditioning. decoder (`MusicgenMelodyForCausalLM`, *optional*): The decoder model that generates audio tokens based on conditioning signals. """ if config is None and None in (text_encoder, audio_encoder, decoder): raise ValueError( "Either a configuration has to be provided, or all three of text encoder, audio encoder and Musicgen Melody decoder." ) if config is None: config = MusicgenMelodyConfig.from_sub_models_config( text_encoder.config, audio_encoder.config, decoder.config ) else: if not isinstance(config, self.config_class): raise ValueError(f"Config: {config} has to be of type {self.config_class}") # initialize with config super().__init__(config) if text_encoder is None: text_encoder = AutoModelForTextEncoding.from_config(config.text_encoder) if audio_encoder is None: audio_encoder = AutoModel.from_config(config.audio_encoder) if decoder is None: decoder = MusicgenMelodyForCausalLM._from_config(config.decoder) self.text_encoder = text_encoder self.audio_encoder = audio_encoder self.decoder = decoder # make sure that the individual model's config refers to the shared config # so that the updates to the config will be synced self.config.text_encoder._attn_implementation = self.text_encoder.config._attn_implementation self.config.audio_encoder._attn_implementation = self.audio_encoder.config._attn_implementation self.config.decoder._attn_implementation = self.decoder.config._attn_implementation self.text_encoder.config = self.config.text_encoder self.audio_encoder.config = self.config.audio_encoder self.decoder.config = self.config.decoder # text encoder outputs might need to be projected to different dimension for decoder if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size: self.enc_to_dec_proj = nn.Linear(self.text_encoder.config.hidden_size, self.decoder.config.hidden_size) # audio encoder outputs after chroma extraction might need to be projected to different dimension for decoder if self.config.num_chroma != self.decoder.config.hidden_size: self.audio_enc_to_dec_proj = nn.Linear(self.config.num_chroma, self.decoder.config.hidden_size) if self.text_encoder.get_output_embeddings() is not None: raise ValueError( f"The encoder {self.text_encoder} should not have a LM Head. Please use a model without and LM Head" ) # Initialize projection layers weights and tie text encoder and decoder weights if set accordingly self.post_init() def _init_weights(self, module): # MusicgenMelodyForConditionalGeneration is made of PreTrainedModels that have already been initialized # Projection layers still need to be initialized. std = self.decoder.config.initializer_factor if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() def tie_weights(self): # tie text encoder & decoder if needed if self.config.tie_encoder_decoder: # tie text encoder and decoder base model decoder_base_model_prefix = self.decoder.base_model_prefix tied_weights = self._tie_encoder_decoder_weights( self.text_encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix, "text_encoder", ) # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class # attributed not an instance member, therefore modifying it will modify the entire class # Leading to issues on subsequent calls by different tests or subsequent calls. self._dynamic_tied_weights_keys = tied_weights def get_text_encoder(self): return self.text_encoder def get_encoder(self): # get the text encoder to compute the conditioning hidden-states for generation return self.get_text_encoder() def get_decoder(self): return self.decoder def get_input_embeddings(self): return self.text_encoder.get_input_embeddings() def get_output_embeddings(self): return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.decoder.set_output_embeddings(new_embeddings) @classmethod # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenForConditionalGeneration.from_sub_models_pretrained with Musicgen->MusicgenMelody, musicgen-small->musicgen-melody def from_sub_models_pretrained( cls, text_encoder_pretrained_model_name_or_path: Optional[str] = None, audio_encoder_pretrained_model_name_or_path: Optional[str] = None, decoder_pretrained_model_name_or_path: Optional[str] = None, *model_args, **kwargs, ) -> PreTrainedModel: r""" Instantiate a text encoder, an audio encoder, and a MusicGen decoder from one, two or three base classes of the library from pretrained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: text_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the text encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. audio_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the audio encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the decoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the text encoder configuration, use the prefix *text_encoder_* for each configuration parameter. - To update the audio encoder configuration, use the prefix *audio_encoder_* for each configuration parameter. - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import MusicgenMelodyForConditionalGeneration >>> # initialize a musicgen model from a t5 text encoder, encodec audio encoder, and musicgen decoder >>> model = MusicgenMelodyForConditionalGeneration.from_sub_models_pretrained( ... text_encoder_pretrained_model_name_or_path="google-t5/t5-base", ... audio_encoder_pretrained_model_name_or_path="facebook/encodec_24khz", ... decoder_pretrained_model_name_or_path="facebook/musicgen-melody", ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./musicgen-ft") >>> # load fine-tuned model >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("./musicgen-ft") ```""" kwargs_text_encoder = { argument[len("text_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("text_encoder_") } kwargs_audio_encoder = { argument[len("audio_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("audio_encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } # remove text encoder, audio encoder and decoder kwargs from kwargs for key in kwargs_text_encoder: del kwargs["text_encoder_" + key] for key in kwargs_audio_encoder: del kwargs["audio_encoder_" + key] for key in kwargs_decoder: del kwargs["decoder_" + key] # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. text_encoder = kwargs_text_encoder.pop("model", None) if text_encoder is None: if text_encoder_pretrained_model_name_or_path is None: raise ValueError( "If `text_encoder_model` is not defined as an argument, a `text_encoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_text_encoder: encoder_config, kwargs_text_encoder = AutoConfig.from_pretrained( text_encoder_pretrained_model_name_or_path, **kwargs_text_encoder, return_unused_kwargs=True ) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info( f"Initializing {text_encoder_pretrained_model_name_or_path} as a text_encoder model " "from a decoder model. Cross-attention and casual mask are disabled." ) encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_text_encoder["config"] = encoder_config text_encoder = AutoModel.from_pretrained( text_encoder_pretrained_model_name_or_path, *model_args, **kwargs_text_encoder ) audio_encoder = kwargs_audio_encoder.pop("model", None) if audio_encoder is None: if audio_encoder_pretrained_model_name_or_path is None: raise ValueError( "If `audio_encoder_model` is not defined as an argument, an `audio_encoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_audio_encoder: encoder_config, kwargs_audio_encoder = AutoConfig.from_pretrained( audio_encoder_pretrained_model_name_or_path, **kwargs_audio_encoder, return_unused_kwargs=True ) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info( f"Initializing {audio_encoder_pretrained_model_name_or_path} as an audio_encoder model " "from a decoder model. Cross-attention and casual mask are disabled." ) encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_audio_encoder["config"] = encoder_config audio_encoder = AutoModel.from_pretrained( audio_encoder_pretrained_model_name_or_path, *model_args, **kwargs_audio_encoder ) decoder = kwargs_decoder.pop("model", None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError( "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_decoder: decoder_config, kwargs_decoder = AutoConfig.from_pretrained( decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True ) if isinstance(decoder_config, MusicgenMelodyConfig): decoder_config = decoder_config.decoder if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info( f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." ) decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder["config"] = decoder_config if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: logger.warning( f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " "passed to `.from_sub_models_pretrained(...)` are set to `True` or do not pass a " "`decoder_config` to `.from_sub_models_pretrained(...)`" ) decoder = MusicgenMelodyForCausalLM.from_pretrained( decoder_pretrained_model_name_or_path, **kwargs_decoder ) # instantiate config with corresponding kwargs config = MusicgenMelodyConfig.from_sub_models_config( text_encoder.config, audio_encoder.config, decoder.config, **kwargs ) return cls(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder, config=config) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.BoolTensor] = None, input_features: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, MusicgenMelodyOutputWithPast]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) <Tip warning={true}> The `decoder_input_ids` will automatically be converted from shape `(batch_size * num_codebooks, target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as `decoder_input_ids`. </Tip> decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of conditional hidden-states representing the concatenation of the projected text encoder output and the projected audio encoder output. Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration >>> import torch >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> inputs = processor( ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> pad_token_id = model.generation_config.pad_token_id >>> decoder_input_ids = ( ... torch.ones((inputs.input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long) ... * pad_token_id ... ) >>> logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits >>> logits.shape # (bsz * num_codebooks, encoder_len + tgt_len, vocab_size) torch.Size([8, 249, 2048]) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict kwargs_text_encoder = { argument[len("text_encoder_")]: value for argument, value in kwargs.items() if argument.startswith("text_encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } if encoder_hidden_states is None: if inputs_embeds is not None or input_ids is not None: encoder_outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs_text_encoder, ) encoder_hidden_states = encoder_outputs[0] # optionally project encoder_hidden_states if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) if attention_mask is not None and encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states * attention_mask[..., None] # set a default audio conditional hidden states if text is not None if encoder_hidden_states is not None and input_features is None: input_features = torch.zeros( (encoder_hidden_states.shape[0], 1, self.config.num_chroma), device=self.device, dtype=self.dtype, ) input_features[:, :, 0] = 1 if input_features is not None: audio_hidden_states = input_features # optionally project audio_hidden_states -> # (batch_size, seq_len, num_chroma) -> (batch_size, seq_len, hidden_size) if self.config.num_chroma != self.decoder.config.hidden_size: audio_hidden_states = self.audio_enc_to_dec_proj(audio_hidden_states) # pad or truncate to config.chroma_length if audio_hidden_states.shape[1] < self.config.chroma_length: n_repeat = int(math.ceil(self.config.chroma_length / audio_hidden_states.shape[1])) audio_hidden_states = audio_hidden_states.repeat(1, n_repeat, 1) else: logger.warning( f"The conditional audio signal is of length {audio_hidden_states.shape[1]}, which exceeds" f"the maximum chroma duration of {self.config.chroma_length}." f"The audio will be truncated to {self.config.chroma_length} frames." ) audio_hidden_states = audio_hidden_states[:, : self.config.chroma_length] if encoder_hidden_states is not None: encoder_hidden_states = torch.cat([audio_hidden_states, encoder_hidden_states], dim=1) else: encoder_hidden_states = audio_hidden_states if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None): decoder_input_ids = shift_tokens_right( labels, self.config.decoder.pad_token_id, self.config.decoder.bos_token_id ) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, past_key_values=past_key_values, return_dict=return_dict, labels=labels, **kwargs_decoder, ) if not return_dict: return decoder_outputs + (encoder_hidden_states,) return MusicgenMelodyOutputWithPast( loss=decoder_outputs.loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, encoder_hidden_states=encoder_hidden_states, ) def prepare_inputs_for_generation( self, decoder_input_ids, encoder_hidden_states=None, past_key_values=None, attention_mask=None, decoder_attention_mask=None, decoder_head_mask=None, use_cache=None, decoder_delay_pattern_mask=None, guidance_scale=None, **kwargs, ): # Overwritten -- MusicGen has custom processing if decoder_delay_pattern_mask is None: decoder_input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask( decoder_input_ids, self.generation_config.pad_token_id, max_length=self.generation_config.max_length, ) # apply the delay pattern mask decoder_input_ids = self.decoder.apply_delay_pattern_mask(decoder_input_ids, decoder_delay_pattern_mask) if guidance_scale is not None and guidance_scale > 1: # for classifier free guidance we need to replicate the decoder args across the batch dim (we'll split these # before sampling) decoder_input_ids = decoder_input_ids.repeat((2, 1)) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.repeat((2, 1)) if past_key_values is not None: past_length = past_key_values.get_seq_length() # Some generation methods already pass only the last input ID if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] # we only want to use conditional signal in the 1st generation step but keeping the attention mask encoder_hidden_states = None # we also have to update the attention mask return { "input_ids": None, # encoder_hidden_states is defined. input_ids not needed "encoder_hidden_states": encoder_hidden_states, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "decoder_head_mask": decoder_head_mask, "use_cache": use_cache, } # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenForConditionalGeneration._prepare_decoder_input_ids_for_generation def _prepare_decoder_input_ids_for_generation( self, batch_size: int, model_input_name: str, model_kwargs: dict[str, torch.Tensor], decoder_start_token_id: Optional[int] = None, bos_token_id: Optional[int] = None, device: torch.device = None, ) -> tuple[torch.LongTensor, dict[str, torch.Tensor]]: """Prepares `decoder_input_ids` for generation with encoder-decoder models""" # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. if model_kwargs is not None and "decoder_input_ids" in model_kwargs: decoder_input_ids = model_kwargs.pop("decoder_input_ids") elif "input_ids" in model_kwargs and model_input_name != "input_ids": decoder_input_ids = model_kwargs.pop("input_ids") else: decoder_input_ids = None # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) if device is None: device = self.device decoder_input_ids_start = ( torch.ones((batch_size * self.decoder.num_codebooks, 1), dtype=torch.long, device=device) * decoder_start_token_id ) # no user input -> use decoder_start_token_id as decoder_input_ids if decoder_input_ids is None: decoder_input_ids = decoder_input_ids_start # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust # decoder_attention_mask if provided) elif (decoder_input_ids[..., 0] != decoder_start_token_id).all().item(): decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1) if "decoder_attention_mask" in model_kwargs: decoder_attention_mask = model_kwargs["decoder_attention_mask"] decoder_attention_mask = torch.cat( (torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), dim=-1, ) model_kwargs["decoder_attention_mask"] = decoder_attention_mask return decoder_input_ids, model_kwargs def _prepare_encoder_hidden_states_kwargs_for_generation( self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str], generation_config: GenerationConfig, ) -> dict[str, Any]: encoder_hidden_states = None # attention mask is consumed once to produce text conditional hidden states through the text encoder encoder_attention_mask = model_kwargs.pop("attention_mask") guidance_scale = generation_config.guidance_scale # 1. condition on text if inputs_tensor is not None: encoder = self.get_text_encoder() # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device # as the inputs. if hasattr(encoder, "_hf_hook"): encoder._hf_hook.io_same_device = True # Prepare args and kwargs from model kwargs. irrelevant_prefix = ["decoder_", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } encoder_signature = set(inspect.signature(encoder.forward).parameters) encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature if not encoder_accepts_wildcard: encoder_kwargs = { argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature } encoder_kwargs["output_attentions"] = generation_config.output_attentions encoder_kwargs["output_hidden_states"] = generation_config.output_hidden_states # make sure that encoder returns `ModelOutput` model_input_name = model_input_name if model_input_name is not None else self.text_encoder.main_input_name encoder_kwargs["return_dict"] = True encoder_kwargs[model_input_name] = inputs_tensor if encoder_attention_mask is not None: encoder_kwargs["attention_mask"] = encoder_attention_mask encoder_hidden_states = encoder(**encoder_kwargs).last_hidden_state # optionally project encoder_hidden_states if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) # for classifier free guidance we need to add a 'null' input to our encoder hidden states if guidance_scale is not None and guidance_scale > 1: encoder_hidden_states = torch.concatenate( [encoder_hidden_states, torch.zeros_like(encoder_hidden_states)], dim=0 ) if encoder_attention_mask is not None: encoder_attention_mask = torch.concatenate( [encoder_attention_mask, torch.zeros_like(encoder_attention_mask)], dim=0 ) if encoder_attention_mask is not None: encoder_hidden_states = encoder_hidden_states * encoder_attention_mask[..., None] # 2. condition on audio audio_hidden_states = model_kwargs.get("input_features", None) if inputs_tensor is not None: if audio_hidden_states is not None: null_audio_hidden_states = torch.zeros_like(audio_hidden_states) else: null_audio_hidden_states = torch.zeros( (inputs_tensor.shape[0], 1, self.config.num_chroma), device=self.device, dtype=self.dtype ) null_audio_hidden_states[:, :, 0] = 1 if audio_hidden_states is None: audio_hidden_states = null_audio_hidden_states if audio_hidden_states is not None: # for classifier free guidance we need to add a 'null' input to our audio hidden states if guidance_scale is not None and guidance_scale > 1: audio_hidden_states = torch.concatenate([audio_hidden_states, null_audio_hidden_states], dim=0) # optionally project audio_hidden_states -> # (batch_size, seq_len, num_chroma) -> (batch_size, seq_len, hidden_size) if self.config.num_chroma != self.decoder.config.hidden_size: audio_hidden_states = self.audio_enc_to_dec_proj(audio_hidden_states) # pad or truncate to config.chroma_length if audio_hidden_states.shape[1] < self.config.chroma_length: n_repeat = int(math.ceil(self.config.chroma_length / audio_hidden_states.shape[1])) audio_hidden_states = audio_hidden_states.repeat(1, n_repeat, 1) audio_hidden_states = audio_hidden_states[:, : self.config.chroma_length] if encoder_hidden_states is not None: encoder_hidden_states = torch.cat([audio_hidden_states, encoder_hidden_states], dim=1) else: encoder_hidden_states = audio_hidden_states model_kwargs["encoder_hidden_states"] = encoder_hidden_states return model_kwargs def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.decoder.pad_token_id, self.config.decoder.bos_token_id) def resize_token_embeddings(self, *args, **kwargs): raise NotImplementedError( "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the" " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or" " model.decoder.resize_token_embeddings(...))" ) def _maybe_initialize_input_ids_for_generation( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[dict[str, torch.Tensor]] = None, ) -> torch.LongTensor: """Initializes input ids for generation, if necessary.""" if inputs is not None: return inputs if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with # soft-prompting or in multimodal implementations built on top of decoder-only language models. batch_size = 1 for value in model_kwargs.values(): if isinstance(value, torch.Tensor): batch_size = value.shape[0] break return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id def freeze_audio_encoder(self): """ Freeze the audio encoder weights. """ for param in self.audio_encoder.parameters(): param.requires_grad = False self.audio_encoder._requires_grad = False def freeze_text_encoder(self): """ Freeze the text encoder weights. """ for param in self.text_encoder.parameters(): param.requires_grad = False self.text_encoder._requires_grad = False # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenForConditionalGeneration._get_decoder_start_token_id def _get_decoder_start_token_id( self, decoder_start_token_id: Optional[Union[int, list[int]]] = None, bos_token_id: Optional[int] = None ) -> int: decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.generation_config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif bos_token_id is not None: return bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) @torch.no_grad() def generate( self, inputs: Optional[torch.Tensor] = None, generation_config: Optional[GenerationConfig] = None, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, synced_gpus: Optional[bool] = None, streamer: Optional["BaseStreamer"] = None, **kwargs, ): """ Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed to avoid deadlocking with `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3). streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. kwargs (`dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateDecoderOnlyOutput`], - [`~generation.GenerateBeamDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) # 2. Set generation parameters if not already defined logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() requires_attention_mask = "encoder_outputs" not in model_kwargs kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None # 3. Define model inputs inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( inputs, generation_config.bos_token_id, model_kwargs ) batch_size = inputs_tensor.shape[0] self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=inputs_tensor.device) # 4. Define other model kwargs model_kwargs["use_cache"] = generation_config.use_cache model_kwargs["guidance_scale"] = generation_config.guidance_scale if model_kwargs.get("attention_mask", None) is None and requires_attention_mask: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, generation_config, model_kwargs ) if "encoder_hidden_states" not in model_kwargs: # encoder_hidden_states are created and added to `model_kwargs` model_kwargs = self._prepare_encoder_hidden_states_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name, generation_config ) # 5. Prepare `input_ids` which will be used for auto-regressive generation input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( batch_size=batch_size, model_input_name=model_input_name, model_kwargs=model_kwargs, decoder_start_token_id=generation_config._decoder_start_token_tensor, bos_token_id=generation_config._bos_token_tensor, device=inputs_tensor.device, ) # 6. Prepare `max_length` depending on other stopping criteria. input_ids_length = input_ids.shape[-1] has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None generation_config = self._prepare_generated_length( generation_config=generation_config, has_default_max_length=has_default_max_length, has_default_min_length=has_default_min_length, model_input_name=model_input_name, inputs_tensor=inputs_tensor, input_ids_length=input_ids_length, ) self._validate_generated_length(generation_config, input_ids_length, has_default_max_length) # 7. Prepare the cache. # - `model_kwargs` may be updated in place with a cache as defined by the parameters in `generation_config`. # - different models have a different cache name expected by the model (default = "past_key_values") # - `max_length`, prepared above, is used to determine the maximum cache length max_cache_length = generation_config.max_length - 1 if ( inputs_tensor.shape[1] != input_ids_length and model_input_name == "inputs_embeds" and not self.config.is_encoder_decoder ): max_cache_length += inputs_tensor.shape[1] self._prepare_cache_for_generation( generation_config, model_kwargs, assistant_model=None, batch_size=batch_size, max_cache_length=max_cache_length, ) # build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to MusicGen) input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask( input_ids, pad_token_id=generation_config._decoder_start_token_tensor, max_length=generation_config.max_length, ) # stash the delay mask so that we don't have to recompute in each forward pass model_kwargs["decoder_delay_pattern_mask"] = decoder_delay_pattern_mask # input_ids are ready to be placed on the streamer (if used) if streamer is not None: streamer.put(input_ids.cpu()) # 8. determine generation mode generation_mode = generation_config.get_generation_mode() # 9. prepare batched CFG externally (to enable coexistence with the unbatched CFG) if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) generation_config.guidance_scale = None # 10. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_length, encoder_input_ids=inputs_tensor, prefix_allowed_tokens_fn=None, logits_processor=logits_processor, device=input_ids.device, ) # 10. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( generation_config=generation_config, stopping_criteria=stopping_criteria ) if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH): # expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 11. run sample outputs = self._sample( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, generation_config=generation_config, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs, ) else: raise ValueError( "Got incompatible mode for generation, should be one of greedy or sampling. " "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`." ) if generation_config.return_dict_in_generate: output_ids = outputs.sequences else: output_ids = outputs # apply the pattern mask to the final ids output_ids = self.decoder.apply_delay_pattern_mask(output_ids, model_kwargs["decoder_delay_pattern_mask"]) # revert the pattern delay mask by filtering the pad token id output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape( batch_size, self.decoder.num_codebooks, -1 ) # append the frame dimension back to the audio codes output_ids = output_ids[None, ...] audio_scales = model_kwargs.get("audio_scales") if audio_scales is None: audio_scales = [None] * batch_size if self.decoder.config.audio_channels == 1: output_values = self.audio_encoder.decode( output_ids, audio_scales=audio_scales, ).audio_values else: codec_outputs_left = self.audio_encoder.decode(output_ids[:, :, ::2, :], audio_scales=audio_scales) output_values_left = codec_outputs_left.audio_values codec_outputs_right = self.audio_encoder.decode(output_ids[:, :, 1::2, :], audio_scales=audio_scales) output_values_right = codec_outputs_right.audio_values output_values = torch.cat([output_values_left, output_values_right], dim=1) if generation_config.return_dict_in_generate: outputs.sequences = output_values return outputs else: return output_values __all__ = [ "MusicgenMelodyForConditionalGeneration", "MusicgenMelodyForCausalLM", "MusicgenMelodyModel", "MusicgenMelodyPreTrainedModel", ]
transformers/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py/0
{ "file_path": "transformers/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py", "repo_id": "transformers", "token_count": 47241 }
507
# coding=utf-8 # Copyright 2024 Om Research Lab and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OmDet-Turbo model.""" import math import warnings from collections import OrderedDict from dataclasses import dataclass from functools import lru_cache from typing import Optional, Union import torch import torch.nn.functional as F from torch import Tensor, nn from ...activations import ACT2CLS, ACT2FN from ...file_utils import ( ModelOutput, ) from ...integrations import use_kernel_forward_from_hub from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ...utils.backbone_utils import load_backbone from ..auto import AutoModel from .configuration_omdet_turbo import OmDetTurboConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Base class for outputs of the OmDetTurboHybridEncoder. """ ) class OmDetTurboEncoderOutput(ModelOutput): r""" last_hidden_state (`torch.FloatTensor`): Last hidden states of the encoder. extracted_states (`tuple[torch.FloatTensor]`): The extracted states from the Feature Pyramid Network (FPN) and Path Aggregation Network (PAN) of the encoder. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None extracted_states: tuple[torch.FloatTensor] = None @dataclass @auto_docstring( custom_intro=""" Base class for outputs of the OmDetTurboDecoder. """ ) class OmDetTurboDecoderOutput(ModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder. decoder_coords (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The predicted coordinates of the objects. decoder_classes (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes)`): The predicted classes of the objects. encoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The predicted coordinates of the objects from the encoder. encoder_class_logits (`tuple[torch.FloatTensor]` of shape `(batch_size, num_queries, num_classes)`): The predicted class of the objects from the encoder. init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The initial reference points. intermediate_reference_points (`tuple[tuple[torch.FloatTensor]]`): The intermediate reference points. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None decoder_coords: Optional[torch.FloatTensor] = None decoder_classes: Optional[torch.FloatTensor] = None encoder_coord_logits: Optional[torch.FloatTensor] = None encoder_class_logits: tuple[torch.FloatTensor] = None init_reference_points: Optional[torch.FloatTensor] = None intermediate_reference_points: tuple[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`OmDetTurboObjectDetectionOutput`]. """ ) class OmDetTurboObjectDetectionOutput(ModelOutput): r""" loss (`torch.FloatTensor`): The loss value. decoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The predicted coordinates logits of the objects. decoder_class_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes)`): The predicted class of the objects. init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The initial reference points. intermediate_reference_points (`tuple[tuple[torch.FloatTensor]]`): The intermediate reference points. encoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The predicted coordinates of the objects from the encoder. encoder_class_logits (`tuple[torch.FloatTensor]`): The predicted class of the objects from the encoder. encoder_extracted_states (`torch.FloatTensor`): The extracted states from the Feature Pyramid Network (FPN) and Path Aggregation Network (PAN) of the encoder. decoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple[tuple[torch.FloatTensor]]`, *optional*): Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention, cross-attention and multi-scale deformable attention heads. encoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple[tuple[torch.FloatTensor]]`, *optional*): Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention, cross-attention and multi-scale deformable attention heads. classes_structure (`torch.LongTensor`, *optional*): The number of queried classes for each image. """ loss: Optional[torch.FloatTensor] = None decoder_coord_logits: Optional[torch.FloatTensor] = None decoder_class_logits: Optional[torch.FloatTensor] = None init_reference_points: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[tuple[tuple[torch.FloatTensor]]] = None encoder_coord_logits: Optional[torch.FloatTensor] = None encoder_class_logits: tuple[torch.FloatTensor] = None encoder_extracted_states: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None classes_structure: Optional[torch.LongTensor] = None @use_kernel_forward_from_hub("MultiScaleDeformableAttention") # Copied from transformers.models.deformable_detr.modeling_deformable_detr.MultiScaleDeformableAttention class MultiScaleDeformableAttention(nn.Module): def forward( self, value: Tensor, value_spatial_shapes: Tensor, value_spatial_shapes_list: list[tuple], level_start_index: Tensor, sampling_locations: Tensor, attention_weights: Tensor, im2col_step: int, ): batch_size, _, num_heads, hidden_dim = value.shape _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape value_list = value.split([height * width for height, width in value_spatial_shapes_list], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for level_id, (height, width) in enumerate(value_spatial_shapes_list): # batch_size, height*width, num_heads, hidden_dim # -> batch_size, height*width, num_heads*hidden_dim # -> batch_size, num_heads*hidden_dim, height*width # -> batch_size*num_heads, hidden_dim, height, width value_l_ = ( value_list[level_id] .flatten(2) .transpose(1, 2) .reshape(batch_size * num_heads, hidden_dim, height, width) ) # batch_size, num_queries, num_heads, num_points, 2 # -> batch_size, num_heads, num_queries, num_points, 2 # -> batch_size*num_heads, num_queries, num_points, 2 sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) # batch_size*num_heads, hidden_dim, num_queries, num_points sampling_value_l_ = nn.functional.grid_sample( value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False, ) sampling_value_list.append(sampling_value_l_) # (batch_size, num_queries, num_heads, num_levels, num_points) # -> (batch_size, num_heads, num_queries, num_levels, num_points) # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) attention_weights = attention_weights.transpose(1, 2).reshape( batch_size * num_heads, 1, num_queries, num_levels * num_points ) output = ( (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) .sum(-1) .view(batch_size, num_heads * hidden_dim, num_queries) ) return output.transpose(1, 2).contiguous() class OmDetTurboLRUCache: def __init__(self, capacity: int): self.cache = OrderedDict() self.capacity = capacity self.current_load = 0 def has(self, key) -> bool: return key in self.cache def get(self, key): """ Get the value of the key if the key exists in the cache, otherwise return None. Move the key to the end of the cache to show that it was recently used. """ if key not in self.cache: return None self.cache.move_to_end(key) return self.cache[key] def put(self, key, value) -> None: """ Add the key-value pair to the cache. Move the key to the end of the cache to show that it was recently used. If the cache is full, remove the first key (least recently used). """ if key not in self.cache: self.current_load += 1 if self.current_load > self.capacity: self.cache.popitem(last=False) self.current_load -= 1 self.cache[key] = value self.cache.move_to_end(key) class OmDetTurboLanguageBackbone(nn.Module): def __init__(self, config: OmDetTurboConfig): super().__init__() self.model = AutoModel.from_config(config.text_config) self.text_projection = nn.Parameter(torch.zeros(config.text_projection_in_dim, config.text_projection_out_dim)) def forward(self, hidden_states, mask=None, encode_type="task"): text_outputs = self.model(hidden_states) pooled_output = text_outputs[0] if encode_type == "task": if mask is None: raise ValueError("mask is required for task encoding") max_len = (mask != 0).sum(1).max().item() truncated_mask = mask[:, :max_len] truncated_output = pooled_output[:, :max_len, :] return truncated_output.transpose(0, 1), truncated_mask elif encode_type == "class": max_pooled_output = pooled_output[torch.arange(pooled_output.shape[0]), hidden_states.argmax(dim=-1)] projected_output = max_pooled_output @ self.text_projection return projected_output else: raise ValueError(f"encode_type {encode_type} is not supported") class OmDetTurboVisionBackbone(nn.Module): def __init__(self, config: OmDetTurboConfig): super().__init__() self.apply_layernorm_after_vision_backbone = config.apply_layernorm_after_vision_backbone self.vision_backbone = load_backbone(config) self.layer_norms = nn.ModuleList( [nn.LayerNorm(in_channel_dim, eps=config.layer_norm_eps) for in_channel_dim in config.encoder_in_channels] ) def forward(self, pixel_values): outputs = self.vision_backbone(pixel_values).feature_maps if self.apply_layernorm_after_vision_backbone: outputs = [ layer_norm(output).permute(0, 3, 1, 2).contiguous() for layer_norm, output in zip(self.layer_norms, outputs) ] return outputs # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrMultiscaleDeformableAttention with DeformableDetr->OmDetTurbo, Deformable DETR->OmDet-Turbo class OmDetTurboMultiscaleDeformableAttention(nn.Module): """ Multiscale deformable attention as proposed in Deformable DETR. """ def __init__(self, config: OmDetTurboConfig, num_heads: int, n_points: int): super().__init__() self.attn = MultiScaleDeformableAttention() if config.d_model % num_heads != 0: raise ValueError( f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}" ) dim_per_head = config.d_model // num_heads # check if dim_per_head is power of 2 if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): warnings.warn( "You'd better set embed_dim (d_model) in OmDetTurboMultiscaleDeformableAttention to make the" " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" " implementation." ) self.im2col_step = 64 self.d_model = config.d_model self.n_levels = config.num_feature_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2) self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points) self.value_proj = nn.Linear(config.d_model, config.d_model) self.output_proj = nn.Linear(config.d_model, config.d_model) self.disable_custom_kernels = config.disable_custom_kernels def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) batch_size, num_queries, _ = hidden_states.shape batch_size, sequence_length, _ = encoder_hidden_states.shape # Ignore copy total_elements = sum([shape[0] * shape[1] for shape in spatial_shapes_list]) if total_elements != sequence_length: raise ValueError( "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" ) value = self.value_proj(encoder_hidden_states) if attention_mask is not None: # we invert the attention_mask value = value.masked_fill(~attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 ) attention_weights = self.attention_weights(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels * self.n_points ) attention_weights = F.softmax(attention_weights, -1).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points ) # batch_size, num_queries, n_heads, n_levels, n_points, 2 num_coordinates = reference_points.shape[-1] if num_coordinates == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = ( reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] ) elif num_coordinates == 4: sampling_locations = ( reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 ) else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") output = self.attn( value, spatial_shapes, spatial_shapes_list, level_start_index, sampling_locations, attention_weights, self.im2col_step, ) output = self.output_proj(output) return output, attention_weights # Copied from transformers.models.rt_detr.modeling_rt_detr.RTDetrConvNormLayer with RTDetr->OmDetTurbo class OmDetTurboConvNormLayer(nn.Module): def __init__(self, config, in_channels, out_channels, kernel_size, stride, padding=None, activation=None): super().__init__() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size, stride, padding=(kernel_size - 1) // 2 if padding is None else padding, bias=False, ) self.norm = nn.BatchNorm2d(out_channels, config.batch_norm_eps) self.activation = nn.Identity() if activation is None else ACT2CLS[activation]() def forward(self, hidden_state): hidden_state = self.conv(hidden_state) hidden_state = self.norm(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state # Copied from transformers.models.rt_detr.modeling_rt_detr.RTDetrRepVggBlock with RTDetr->OmDetTurbo, activation_function->csp_activation class OmDetTurboRepVggBlock(nn.Module): """ RepVGG architecture block introduced by the work "RepVGG: Making VGG-style ConvNets Great Again". """ def __init__(self, config: OmDetTurboConfig): super().__init__() activation = config.csp_activation hidden_channels = int(config.encoder_hidden_dim * config.hidden_expansion) self.conv1 = OmDetTurboConvNormLayer(config, hidden_channels, hidden_channels, 3, 1, padding=1) self.conv2 = OmDetTurboConvNormLayer(config, hidden_channels, hidden_channels, 1, 1, padding=0) self.activation = nn.Identity() if activation is None else ACT2CLS[activation]() def forward(self, x): y = self.conv1(x) + self.conv2(x) return self.activation(y) # Copied from transformers.models.rt_detr.modeling_rt_detr.RTDetrCSPRepLayer with RTDetr->OmDetTurbo, activation_function->csp_activation class OmDetTurboCSPRepLayer(nn.Module): """ Cross Stage Partial (CSP) network layer with RepVGG blocks. """ def __init__(self, config: OmDetTurboConfig): super().__init__() in_channels = config.encoder_hidden_dim * 2 out_channels = config.encoder_hidden_dim num_blocks = 3 activation = config.csp_activation hidden_channels = int(out_channels * config.hidden_expansion) self.conv1 = OmDetTurboConvNormLayer(config, in_channels, hidden_channels, 1, 1, activation=activation) self.conv2 = OmDetTurboConvNormLayer(config, in_channels, hidden_channels, 1, 1, activation=activation) self.bottlenecks = nn.Sequential(*[OmDetTurboRepVggBlock(config) for _ in range(num_blocks)]) if hidden_channels != out_channels: self.conv3 = OmDetTurboConvNormLayer(config, hidden_channels, out_channels, 1, 1, activation=activation) else: self.conv3 = nn.Identity() def forward(self, hidden_state): hidden_state_1 = self.conv1(hidden_state) hidden_state_1 = self.bottlenecks(hidden_state_1) hidden_state_2 = self.conv2(hidden_state) return self.conv3(hidden_state_1 + hidden_state_2) class OmDetTurboMultiheadAttention(nn.Module): """Equivalent implementation of nn.MultiheadAttention with `batch_first=True`.""" def __init__(self, config, hidden_size, num_attention_heads, dropout): super().__init__() if hidden_size % num_attention_heads != 0: raise ValueError( f"The hidden size ({hidden_size}) is not a multiple of the number of attention " f"heads ({num_attention_heads})" ) self.num_attention_heads = num_attention_heads self.attention_head_size = int(hidden_size / num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(hidden_size, self.all_head_size) self.key = nn.Linear(hidden_size, self.all_head_size) self.value = nn.Linear(hidden_size, self.all_head_size) self.out_proj = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(dropout) def forward( self, queries: torch.Tensor, keys: torch.Tensor, values: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: batch_size, seq_length, _ = queries.shape query_layer = ( self.query(queries) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(keys).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) ) value_layer = ( self.value(values).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) ) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) context_layer = self.out_proj(context_layer) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class OmDetTurboEncoderLayer(nn.Module): def __init__(self, config: OmDetTurboConfig): super().__init__() self.self_attn = OmDetTurboMultiheadAttention( config, hidden_size=config.encoder_hidden_dim, num_attention_heads=config.num_attention_heads, dropout=config.encoder_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.encoder_dropout) self.activation_fn = ACT2FN[config.encoder_feedforward_activation] self.encoder_feedforward_dropout = nn.Dropout(config.encoder_feedforward_dropout) self.fc1 = nn.Linear(config.encoder_hidden_dim, config.encoder_dim_feedforward) self.fc2 = nn.Linear(config.encoder_dim_feedforward, config.encoder_hidden_dim) self.final_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps) @staticmethod def with_pos_embed(tensor, pos_embed): return tensor if pos_embed is None else tensor + pos_embed def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. position_embeddings (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states query = key = self.with_pos_embed(hidden_states, position_embeddings) hidden_states = self.self_attn( queries=query, keys=key, values=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states, attentions = hidden_states if output_attentions else (hidden_states[0], None) hidden_states = self.dropout(hidden_states) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.encoder_feedforward_dropout(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) if output_attentions: return hidden_states, attentions return (hidden_states,) class OmDetTurboEncoder(nn.Module): def __init__(self, config: OmDetTurboConfig): super().__init__() self.layers = nn.ModuleList([OmDetTurboEncoderLayer(config) for _ in range(config.encoder_layers)]) def forward( self, src, src_mask=None, pos_embed=None, output_attentions: bool = False ) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]]]: hidden_states = src attention = () if output_attentions else None for layer in self.layers: hidden_states = layer( hidden_states, attention_mask=src_mask, position_embeddings=pos_embed, output_attentions=output_attentions, ) if output_attentions: attention = attention + (hidden_states[1],) hidden_states = hidden_states[0] return hidden_states, attention class OmDetTurboHybridEncoder(nn.Module): """ Encoder consisting of channel projection layers, a set of `OmDetTurboEncoder`, a top-down Feature Pyramid Network (FPN) and a bottom-up Path Aggregation Network (PAN). More details on the paper: https://huggingface.co/papers/2304.08069 Args: config: OmDetTurboConfig """ def __init__(self, config: OmDetTurboConfig): super().__init__() self.config = config self.in_channels = config.encoder_in_channels self.encoder_hidden_dim = config.encoder_hidden_dim self.encoder_projection_indices = config.encoder_projection_indices self.positional_encoding_temperature = config.positional_encoding_temperature self.eval_size = config.eval_size self.out_channels = [self.encoder_hidden_dim for _ in self.in_channels] self.channel_projection_layers = nn.ModuleList() for in_channel in self.in_channels: self.channel_projection_layers.append( nn.Sequential( nn.Conv2d(in_channel, self.encoder_hidden_dim, kernel_size=(1, 1), bias=False), nn.BatchNorm2d(self.encoder_hidden_dim), ) ) # encoder transformer self.encoder = nn.ModuleList([OmDetTurboEncoder(config) for _ in range(len(self.encoder_projection_indices))]) # top-down fpn self.lateral_convs = nn.ModuleList() self.fpn_blocks = nn.ModuleList() for _ in range(len(self.in_channels) - 1, 0, -1): self.lateral_convs.append( OmDetTurboConvNormLayer( config, in_channels=self.encoder_hidden_dim, out_channels=self.encoder_hidden_dim, kernel_size=1, stride=1, activation=config.conv_norm_activation, ) ) self.fpn_blocks.append(OmDetTurboCSPRepLayer(config)) # bottom-up pan self.downsample_convs = nn.ModuleList() self.pan_blocks = nn.ModuleList() for _ in range(len(self.in_channels) - 1): self.downsample_convs.append( OmDetTurboConvNormLayer( config, in_channels=self.encoder_hidden_dim, out_channels=self.encoder_hidden_dim, kernel_size=3, stride=2, activation=config.conv_norm_activation, ) ) self.pan_blocks.append(OmDetTurboCSPRepLayer(config)) @staticmethod def build_2d_sincos_position_embedding( width, height, embed_dim=256, temperature=10000.0, device="cpu", dtype=torch.float32 ): grid_w = torch.arange(int(width), dtype=dtype, device=device) grid_h = torch.arange(int(height), dtype=dtype, device=device) grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="ij") if embed_dim % 4 != 0: raise ValueError("Embed dimension must be divisible by 4 for 2D sin-cos position embedding") pos_dim = embed_dim // 4 omega = torch.arange(pos_dim, dtype=dtype, device=device) / pos_dim omega = 1.0 / (temperature**omega) out_w = grid_w.flatten()[..., None] @ omega[None] out_h = grid_h.flatten()[..., None] @ omega[None] return torch.concat([out_w.sin(), out_w.cos(), out_h.sin(), out_h.cos()], dim=1)[None, :, :] def forward( self, inputs_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layers) that is passed to the encoder. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeddings encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # get projection features projected_features = [self.channel_projection_layers[i](feature) for i, feature in enumerate(hidden_states)] # encoder for encoder_layer_index, feature_to_project_index in enumerate(self.encoder_projection_indices): if output_hidden_states: encoder_states = encoder_states + (projected_features[feature_to_project_index],) height, width = projected_features[feature_to_project_index].shape[2:] # flatten [batch, channel, height, width] to [batch, height*width, channel] src_flatten = projected_features[feature_to_project_index].flatten(2).permute(0, 2, 1) if self.training or self.eval_size is None: pos_embed = self.build_2d_sincos_position_embedding( width, height, self.encoder_hidden_dim, self.positional_encoding_temperature, device=src_flatten.device, dtype=src_flatten.dtype, ).to(src_flatten.device, src_flatten.dtype) else: pos_embed = None layer_outputs = self.encoder[encoder_layer_index]( src_flatten, pos_embed=pos_embed, output_attentions=output_attentions, ) projected_features[feature_to_project_index] = ( layer_outputs[0].permute(0, 2, 1).reshape(-1, self.encoder_hidden_dim, height, width).contiguous() ) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (projected_features[feature_to_project_index],) # Feature Pyramid Network (FPN) fpn_feature_maps = [projected_features[-1]] for idx in range(len(self.in_channels) - 1, 0, -1): feat_high = fpn_feature_maps[0] feat_low = projected_features[idx - 1] feat_high = self.lateral_convs[len(self.in_channels) - 1 - idx](feat_high) fpn_feature_maps[0] = feat_high upsample_feat = F.interpolate(feat_high, scale_factor=2.0, mode="nearest") fps_map = self.fpn_blocks[len(self.in_channels) - 1 - idx](torch.concat([upsample_feat, feat_low], dim=1)) fpn_feature_maps.insert(0, fps_map) # Path Aggregation Network (PAN) fpn_states = [fpn_feature_maps[0]] for idx in range(len(self.in_channels) - 1): feat_low = fpn_states[-1] feat_high = fpn_feature_maps[idx + 1] downsample_feat = self.downsample_convs[idx](feat_low) hidden_states = self.pan_blocks[idx]( torch.concat([downsample_feat, feat_high.to(downsample_feat.device)], dim=1) ) fpn_states.append(hidden_states) if not return_dict: return (fpn_states[-1], encoder_states, all_attentions, fpn_states) return OmDetTurboEncoderOutput( last_hidden_state=fpn_states[-1], hidden_states=encoder_states, attentions=all_attentions, extracted_states=fpn_states, ) class OmDetTurboMLPWithDropout(nn.Module): def __init__(self, config): super().__init__() self.linear1 = nn.Linear(config.class_embed_dim, config.task_encoder_hidden_dim) self.activation = ACT2FN[config.decoder_activation] self.dropout = nn.Dropout(config.decoder_dropout) self.linear2 = nn.Linear(config.task_encoder_hidden_dim, config.class_embed_dim) def forward(self, x): return self.linear2(self.dropout(self.activation(self.linear1(x)))) class OmDetTurboMLP(nn.Module): """Very simple multi-layer perceptron (also called FFN)""" def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers hidden_layers_dims = [hidden_dim] * (num_layers - 1) layers_dims = [input_dim] + hidden_layers_dims + [output_dim] self.layers = nn.ModuleList( [nn.Linear(in_dim, out_dim) for in_dim, out_dim in zip(layers_dims[:-1], layers_dims[1:])] ) def forward(self, x): for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class OmDetTurboResidualLayer(nn.Module): """ A residual connection followed by a layer norm. """ def __init__(self, config): super().__init__() self.norm1 = nn.LayerNorm(config.class_embed_dim, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.decoder_dropout) def forward(self, x, y): return self.norm1(x + self.dropout(y)) class OmDetTurboTaskEncoder(nn.Module): def __init__(self, config): super().__init__() self.mlp = OmDetTurboMLPWithDropout(config) self.res1 = OmDetTurboResidualLayer(config) def forward(self, x): mlp_out = self.mlp(x) x = self.res1(x, mlp_out) return x class OmDetTurboDeformableTransformerDecoderLayer(GradientCheckpointingLayer): """ A single layer of the Deformable Transformer Decoder. """ def __init__(self, config): super().__init__() # self attention self.self_attn = OmDetTurboMultiheadAttention( config, hidden_size=config.decoder_hidden_dim, num_attention_heads=config.decoder_num_heads, dropout=config.decoder_dropout, ) self.dropout1 = nn.Dropout(config.decoder_dropout) self.norm1 = nn.LayerNorm(config.decoder_hidden_dim, eps=config.layer_norm_eps) # cross attention self.cross_attn = OmDetTurboMultiscaleDeformableAttention( config, num_heads=config.decoder_num_heads, n_points=config.decoder_num_points ) self.dropout2 = nn.Dropout(config.decoder_dropout) self.norm2 = nn.LayerNorm(config.decoder_hidden_dim, eps=config.layer_norm_eps) # feed forward network self.linear1 = nn.Linear(config.decoder_hidden_dim, config.decoder_dim_feedforward) self.act = ACT2FN[config.decoder_activation] self.dropout3 = nn.Dropout(config.decoder_dropout) self.linear2 = nn.Linear(config.decoder_dim_feedforward, config.decoder_hidden_dim) self.dropout4 = nn.Dropout(config.decoder_dropout) self.norm3 = nn.LayerNorm(config.decoder_hidden_dim, eps=config.layer_norm_eps) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states @staticmethod def with_pos_embed(tensor, pos): return tensor if pos is None else tensor + pos def forward( self, decoder_embeddings, task_features, reference_points, vision_features, vision_shapes, vision_shapes_list, level_start_index=None, attention_mask=None, padding_mask=None, query_position=None, output_attentions=None, output_hidden_states=None, ): output_attentions = output_attentions if output_attentions is not None else self.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states origin_embedding_len = decoder_embeddings.shape[1] # self attention query = key = self.with_pos_embed(decoder_embeddings, query_position) # combine task_features with query, key, value task_features = task_features.transpose(0, 1) query = torch.cat((query, task_features), dim=1) key = torch.cat((key, task_features), dim=1) decoder_embeddings = torch.cat((decoder_embeddings, task_features), dim=1) outputs = self.self_attn( query, key, decoder_embeddings, attention_mask=attention_mask, output_attentions=output_attentions, ) context, self_attention = outputs if output_attentions else (outputs[0], None) decoder_embeddings = decoder_embeddings + self.dropout1(context) decoder_embeddings = self.norm1(decoder_embeddings) task_features = decoder_embeddings[:, origin_embedding_len:, :].transpose(0, 1) decoder_embeddings = decoder_embeddings[:, :origin_embedding_len, :] # cross attention hidden_states = self.with_pos_embed(decoder_embeddings, query_position) reference_points = reference_points.unsqueeze(2) outputs, cross_attention = self.cross_attn( hidden_states=hidden_states, attention_mask=padding_mask, encoder_hidden_states=vision_features, reference_points=reference_points, spatial_shapes=vision_shapes, spatial_shapes_list=vision_shapes_list, level_start_index=level_start_index, ) decoder_embeddings = decoder_embeddings + self.dropout2(outputs) residual = self.norm2(decoder_embeddings) # feed forward network decoder_embeddings = self.linear2(self.dropout3(self.act(self.linear1(residual)))) decoder_embeddings = residual + self.dropout4(decoder_embeddings) decoder_embeddings = self.norm3(decoder_embeddings) return ( decoder_embeddings, task_features, self_attention if output_attentions else None, cross_attention if output_attentions else None, ) @auto_docstring class OmDetTurboPreTrainedModel(PreTrainedModel): config: OmDetTurboConfig base_model_prefix = "model" main_input_name = "pixel_values" def _init_weights(self, module): def linear_init_(module_to_init): bound = 1 / math.sqrt(module_to_init.weight.shape[0]) nn.init.uniform_(module_to_init.weight, -bound, bound) if hasattr(module_to_init, "bias") and module_to_init.bias is not None: nn.init.uniform_(module_to_init.bias, -bound, bound) if isinstance(module, OmDetTurboEncoderLayer): linear_init_(module.fc1) linear_init_(module.fc2) elif isinstance(module, OmDetTurboDecoder): nn.init.constant_(module.encoder_bbox_head.layers[-1].weight, 0.0) nn.init.constant_(module.encoder_bbox_head.layers[-1].bias, 0.0) for mlp in module.decoder_bbox_head: nn.init.constant_(mlp.layers[-1].weight, 0.0) nn.init.constant_(mlp.layers[-1].bias, 0.0) linear_init_(module.encoder_vision_features[0]) nn.init.xavier_uniform_(module.encoder_vision_features[0].weight) if module.learn_initial_query: nn.init.xavier_uniform_(module.tgt_embed.weight) nn.init.xavier_uniform_(module.query_position_head.layers[0].weight) nn.init.xavier_uniform_(module.query_position_head.layers[1].weight) for layer in module.channel_projection_layers: nn.init.xavier_uniform_(layer[0].weight) elif isinstance(module, OmDetTurboLanguageBackbone): nn.init.normal_(module.text_projection, std=self.config.text_projection_in_dim**-0.5) elif isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)): module.weight.data.fill_(1.0) module.bias.data.zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, OmDetTurboDecoder): module.gradient_checkpointing = value @staticmethod def _get_cache_key_at_index(input_ids, attention_mask, index): input_ids = input_ids[index] input_mask = attention_mask[index] cache_key = tuple(input_ids[input_mask != 0].tolist()) return cache_key def get_cached_class_embeddings(self, classes_input_ids, classes_attention_mask): not_cached_index = [] not_cached_classes = [] total_embeddings = [] for idx, _ in enumerate(classes_input_ids): cache_key = self._get_cache_key_at_index(classes_input_ids, classes_attention_mask, idx) if self.language_cache_class.has(cache_key): total_embeddings.append(self.language_cache_class.get(cache_key)) else: total_embeddings.append(None) not_cached_index.append(idx) not_cached_classes.append(cache_key) if not_cached_classes: not_cached_classes_ids = torch.stack([classes_input_ids[idx] for idx in not_cached_index]) embeddings = self.language_backbone(not_cached_classes_ids, encode_type="class") for idx, emb in enumerate(embeddings): idx_to_put = not_cached_index[idx] total_embeddings[idx_to_put] = emb self.language_cache_class.put(not_cached_classes[idx], emb) total_class_embs = torch.stack(total_embeddings).to(self.device) return total_class_embs def get_cached_task_embeddings(self, tasks_input_ids, tasks_attention_mask): not_cached_index = [] not_cached_tasks = [] total_task_features = [] total_task_masks = [] for idx, _ in enumerate(tasks_input_ids): cache_key = self._get_cache_key_at_index(tasks_input_ids, tasks_attention_mask, idx) if self.language_cache_prompt.has(cache_key): task_feature, task_mask = self.language_cache_prompt.get(cache_key) total_task_features.append(task_feature) total_task_masks.append(task_mask) else: total_task_features.append(None) total_task_masks.append(None) not_cached_index.append(idx) not_cached_tasks.append(cache_key) if not_cached_tasks: not_cached_index_ids = torch.stack([tasks_input_ids[idx] for idx in not_cached_index]) not_cached_mask = torch.stack([tasks_attention_mask[idx] for idx in not_cached_index]) embeddings, masks = self.language_backbone(not_cached_index_ids, mask=not_cached_mask, encode_type="task") for idx in range(embeddings.shape[1]): emb = embeddings[:, [idx], :] idx_to_put = not_cached_index[idx] cur_mask = torch.unsqueeze(masks[idx], dim=0).to(self.device) total_task_features[idx_to_put] = emb total_task_masks[idx_to_put] = cur_mask self.language_cache_prompt.put(not_cached_tasks[idx], (emb, cur_mask)) # pad before concat if needed max_len = max([task.shape[0] for task in total_task_features]) for idx, task in enumerate(total_task_features): if task.shape[0] < max_len: pad_size = max_len - task.shape[0] total_task_features[idx] = F.pad(task, (0, 0, 0, 0, 0, pad_size)) total_task_masks[idx] = F.pad(total_task_masks[idx], (0, pad_size)) total_task_features = torch.cat(total_task_features, dim=1).to(self.device) total_task_masks = torch.cat(total_task_masks, dim=0).to(self.device) return total_task_features, total_task_masks def get_language_embedding( self, classes_input_ids, classes_attention_mask, tasks_input_ids, tasks_attention_mask, classes_structure, ): batched_classes_embeddings = self.get_cached_class_embeddings(classes_input_ids, classes_attention_mask) # regroup class embeddings using saved structure max_class_size = torch.max(classes_structure) class_embeddings_regrouped = [] start = 0 for size in classes_structure: pad_size = max_class_size - size class_embeddings_regrouped.append( F.pad(batched_classes_embeddings[start : start + size], (0, 0, 0, pad_size)).unsqueeze(1) ) start += size class_embeddings = torch.cat(class_embeddings_regrouped, dim=1) task_embeddings, task_mask = self.get_cached_task_embeddings(tasks_input_ids, tasks_attention_mask) return class_embeddings, task_embeddings, task_mask def _cosine_similarity_scaled(a, b, logit_scale): a = a / a.norm(dim=2, keepdim=True).clamp_min(1e-12) b = b / b.norm(dim=1, keepdim=True).clamp_min(1e-12) logit_scale = logit_scale.exp() logits_per_image = logit_scale * torch.bmm(a, b) return logits_per_image def get_class_similarity(class_distance_type, cls_feature, class_proj): logit_scale = torch.tensor(1 / 0.07).log() if class_distance_type == "cosine": class_logits = _cosine_similarity_scaled(cls_feature, class_proj, logit_scale) elif class_distance_type == "dot": class_logits = torch.bmm(cls_feature, class_proj) else: raise Exception(f"Unknown class_distance_type {class_distance_type}") return class_logits def _inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) class OmDetTurboDecoder(OmDetTurboPreTrainedModel): def __init__(self, config: OmDetTurboConfig): self.config = config super().__init__(config) self.gradient_checkpointing = False hidden_dim = config.decoder_hidden_dim self.num_queries = config.num_queries self.class_distance_type = config.class_distance_type self.learn_initial_query = config.learn_initial_query # backbone feature projection self.channel_projection_layers = nn.ModuleList( nn.Sequential(nn.Conv2d(x, hidden_dim, 1, bias=False), nn.BatchNorm2d(hidden_dim)) for x in config.vision_features_channels ) self.task_encoder = OmDetTurboTaskEncoder(config) if config.class_embed_dim != hidden_dim: self.task_project = nn.Linear(config.class_embed_dim, hidden_dim) # Transformer module self.layers = nn.ModuleList( [OmDetTurboDeformableTransformerDecoderLayer(config) for _ in range(config.decoder_num_layers)] ) self.decoder_num_layers = config.decoder_num_layers # decoder embedding if self.learn_initial_query: self.tgt_embed = nn.Embedding(self.num_queries, hidden_dim) self.query_position_head = OmDetTurboMLP( input_dim=4, hidden_dim=2 * hidden_dim, output_dim=hidden_dim, num_layers=2 ) # encoder head self.encoder_vision_features = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.LayerNorm(hidden_dim, eps=config.layer_norm_eps) ) self.encoder_class_head = nn.Linear(config.class_embed_dim, hidden_dim) self.encoder_bbox_head = OmDetTurboMLP(input_dim=hidden_dim, hidden_dim=hidden_dim, output_dim=4, num_layers=3) # decoder head self.decoder_class_head = nn.ModuleList( [nn.Linear(config.class_embed_dim, hidden_dim) for _ in range(config.decoder_num_layers)] ) self.decoder_bbox_head = nn.ModuleList( [OmDetTurboMLP(hidden_dim, hidden_dim, 4, num_layers=3) for _ in range(config.decoder_num_layers)] ) # Initialize weights and apply final processing self.post_init() @lru_cache(maxsize=32) def generate_anchors(self, spatial_shapes=None, grid_size=0.05, device="cpu", dtype=torch.float32): # We always generate anchors in float32 to preserve equivalence between # dynamic and static anchor inference # Ignore copy if spatial_shapes is None: raise ValueError("spatial_shapes must be provided") anchors = [] for level, (height, width) in enumerate(spatial_shapes): grid_y, grid_x = torch.meshgrid( torch.arange(end=height, dtype=dtype, device=device), torch.arange(end=width, dtype=dtype, device=device), indexing="ij", ) grid_xy = torch.stack([grid_x, grid_y], -1) valid_wh = torch.tensor([width, height], dtype=dtype, device=device) grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_wh wh = torch.ones_like(grid_xy, dtype=dtype, device=device) * grid_size * (2.0**level) anchors.append(torch.concat([grid_xy, wh], -1).reshape(-1, height * width, 4)) # define the valid range for anchor coordinates eps = 1e-2 anchors = torch.concat(anchors, 1) valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True) anchors = torch.log(anchors / (1 - anchors)) anchors = torch.where(valid_mask, anchors, torch.inf) return anchors, valid_mask def _get_encoder_input(self, vision_features): # get projection features vision_features = [self.channel_projection_layers[i](feat) for i, feat in enumerate(vision_features)] # get encoder inputs new_vision_features = [] new_vision_shapes_list = [] for feat in vision_features: height, width = feat.shape[2:] # [batch_size, channels, height, width] -> [batch_size, height*width, channels] new_vision_features.append(feat.flatten(2).permute(0, 2, 1)) # [num_feature_levels, 2] new_vision_shapes_list.append((height, width)) # [batch_size, height*width, channels] new_vision_features = torch.cat(new_vision_features, 1) new_vision_shapes = torch.tensor(new_vision_shapes_list, dtype=torch.int64, device=vision_features[0].device) level_start_index = torch.cat((new_vision_shapes.new_zeros((1,)), new_vision_shapes.prod(1).cumsum(0)[:-1])) return new_vision_features, new_vision_shapes, new_vision_shapes_list, level_start_index def _get_decoder_input( self, vision_features, vision_shapes, class_features, denoise_embeddings=None, denoise_bboxes=None ): batch_size = len(vision_features) # prepare input for decoder anchors, valid_mask = self.generate_anchors( vision_shapes, device=vision_features.device, dtype=vision_features.dtype ) predicted_class_features = self.encoder_vision_features( torch.where( valid_mask, vision_features, torch.tensor(0.0, dtype=vision_features.dtype, device=vision_features.device), ) ) original_class_projected = self.encoder_class_head(class_features).permute(1, 2, 0) encoder_class_similarity = get_class_similarity( self.class_distance_type, predicted_class_features, original_class_projected ) # dynamic anchors + static content # (batch_size, height*width, 4) encoder_outputs_bboxes = self.encoder_bbox_head(predicted_class_features) + anchors # query selection # (batch_size, num_queries) topk_ind = torch.topk(encoder_class_similarity.max(-1).values, self.num_queries, dim=1).indices.view(-1) # (batch_size, num_queries) batch_ind = ( torch.arange(end=batch_size, dtype=topk_ind.dtype, device=topk_ind.device) .unsqueeze(-1) .repeat(1, self.num_queries) .view(-1) ) reference_points = encoder_outputs_bboxes[batch_ind, topk_ind].view(batch_size, self.num_queries, -1) encoder_bboxes = reference_points.sigmoid() if denoise_bboxes is not None: reference_points = torch.cat([denoise_bboxes, reference_points], 1) if self.training: reference_points = reference_points.detach() encoder_class_similarity = encoder_class_similarity[batch_ind, topk_ind].view(batch_size, self.num_queries, -1) if self.learn_initial_query: embeddings = self.tgt_embed.weight.unsqueeze(0).repeat(batch_size, 1, 1) else: embeddings = predicted_class_features[batch_ind, topk_ind].view(batch_size, self.num_queries, -1) if self.training: embeddings = embeddings.detach() if denoise_embeddings is not None: embeddings = torch.cat([denoise_embeddings, embeddings], 1) return embeddings, reference_points, encoder_bboxes, encoder_class_similarity, anchors def forward( self, vision_features, class_features, task_features, task_mask, output_attentions=None, output_hidden_states=None, return_dict=None, ): """ Args: vision_features (`torch.FloatTensor`): The sequence of vision features. shape depends on the vision backbone. class_features (`torch.FloatTensor`): The sequence of class features of shape `(class_sequence_length, batch_size, class_embed_dim)`. task_features (`torch.FloatTensor`): The sequence of task features of shape `(task_sequence_length, batch_size, decoder_hidden_dim)`. task_mask (`torch.LongTensor`): The mask for the task features of shape `(batch_size, task_sequence_length)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_features, vision_shapes, vision_shapes_list, level_start_index = self._get_encoder_input( vision_features ) # todo add denoising for training denoise_embeddings, denoise_bboxes, key_padding_mask = None, None, None batch_size = task_mask.shape[0] # compose attn_mask for vision_emb and task_emb fusion task_features = self.task_encoder(task_features) if self.task_project is not None: task_features = self.task_project(task_features) src_key_mask = (task_mask == 0).detach() attn_mask_len = self.num_queries fusion_size = attn_mask_len + task_features.shape[0] key_padding_mask = torch.zeros([batch_size, fusion_size], dtype=torch.bool).to(task_features.device) key_padding_mask[:, attn_mask_len:] = src_key_mask attention_mask = _prepare_4d_attention_mask(~key_padding_mask, dtype=vision_features.dtype) decoder_embeddings, reference_points, encoder_bboxes, encoder_class_similarity, init_reference_points = ( self._get_decoder_input( vision_features, tuple(vision_shapes_list), class_features, denoise_embeddings, denoise_bboxes ) ) all_hidden_states = () if output_hidden_states else None all_attns = () if output_attentions else None all_self_attns = () if output_attentions else None all_cross_attns = () if output_attentions else None predicted_class_features = decoder_embeddings if output_hidden_states: all_hidden_states = all_hidden_states + (predicted_class_features,) decoder_bboxes = [] decoder_classes = [] last_refined_bbox = None reference_points = reference_points.sigmoid() for i, layer in enumerate(self.layers): predicted_class_features, task_features, self_attention, cross_attention = layer( predicted_class_features, task_features, reference_points, vision_features, vision_shapes, vision_shapes_list, level_start_index=level_start_index, attention_mask=attention_mask, query_position=self.query_position_head(reference_points), output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if output_attentions: all_self_attns = all_self_attns + (self_attention,) all_cross_attns = all_cross_attns + (cross_attention,) if output_hidden_states: all_hidden_states = all_hidden_states + (predicted_class_features,) refined_bbox = torch.sigmoid( self.decoder_bbox_head[i](predicted_class_features) + _inverse_sigmoid(reference_points) ) original_class_projected = self.decoder_class_head[i](class_features).permute(1, 2, 0) if self.training: decoder_classes.append( get_class_similarity( class_distance_type=self.class_distance_type, cls_feature=predicted_class_features, class_proj=original_class_projected, ) ) if i == 0: decoder_bboxes.append(refined_bbox) else: decoder_bboxes.append( torch.sigmoid( self.decoder_bbox_head[i](predicted_class_features) + _inverse_sigmoid(last_refined_bbox) ) ) elif i == self.decoder_num_layers - 1: decoder_classes.append( get_class_similarity(self.class_distance_type, predicted_class_features, original_class_projected) ) decoder_bboxes.append(refined_bbox) break last_refined_bbox = refined_bbox reference_points = refined_bbox.detach() if self.training else refined_bbox if output_attentions: all_attns += (all_self_attns, all_cross_attns) last_hidden_state = predicted_class_features decoder_bboxes = torch.stack(decoder_bboxes) decoder_classes = torch.stack(decoder_classes) if not return_dict: return ( last_hidden_state, all_hidden_states, all_attns, decoder_bboxes, decoder_classes, encoder_bboxes, encoder_class_similarity, init_reference_points, reference_points, ) return OmDetTurboDecoderOutput( last_hidden_state=last_hidden_state, hidden_states=all_hidden_states, attentions=all_attns, decoder_coords=decoder_bboxes, decoder_classes=decoder_classes, encoder_coord_logits=encoder_bboxes, encoder_class_logits=encoder_class_similarity, init_reference_points=init_reference_points, intermediate_reference_points=reference_points, ) @auto_docstring( custom_intro=""" OmDetTurbo Model (consisting of a vision and a text backbone, and encoder-decoder architecture) outputting bounding boxes and classes scores for tasks such as COCO detection. """ ) class OmDetTurboForObjectDetection(OmDetTurboPreTrainedModel): def __init__(self, config: OmDetTurboConfig): super().__init__(config) self.vision_backbone = OmDetTurboVisionBackbone(config) self.language_backbone = OmDetTurboLanguageBackbone(config) self.encoder = OmDetTurboHybridEncoder(config) self.decoder = OmDetTurboDecoder(config) self.num_queries = config.num_queries self.language_cache_class = OmDetTurboLRUCache(config.cache_size) self.language_cache_prompt = OmDetTurboLRUCache(config.cache_size) self.vocab_size = config.text_config.vocab_size self.post_init() def get_input_embeddings(self): return self.language_backbone.model.get_input_embeddings() def set_input_embeddings(self, value): self.language_backbone.model.set_input_embeddings(value) def resize_token_embeddings( self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None, mean_resizing: bool = True ) -> nn.Embedding: model_embeds = self.language_backbone.model.resize_token_embeddings( new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of, mean_resizing=mean_resizing ) self.config.text_config.vocab_size = model_embeds.num_embeddings self.vocab_size = model_embeds.num_embeddings return model_embeds @auto_docstring def forward( self, pixel_values: torch.FloatTensor, classes_input_ids: torch.LongTensor, classes_attention_mask: torch.LongTensor, tasks_input_ids: torch.LongTensor, tasks_attention_mask: torch.LongTensor, classes_structure: torch.LongTensor, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.FloatTensor], OmDetTurboObjectDetectionOutput]: r""" classes_input_ids (`torch.LongTensor` of shape `(total_classes (>= batch_size), sequence_length)`): Indices of input classes sequence tokens in the vocabulary of the language model. Several classes can be provided for each tasks, thus the tokenized classes are flattened and the structure of the classes is provided in the `classes_structure` argument. Indices can be obtained using [`OmDetTurboProcessor`]. See [`OmDetTurboProcessor.__call__`] for details. [What are input IDs?](../glossary#input-ids) classes_attention_mask (`torch.BoolTensor` of shape `(total_classes (>= batch_size), num_classes, sequence_length)`): Attention mask for the classes. This is a binary mask that indicates which tokens should be attended to, and which should not. tasks_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input tasks sequence tokens in the vocabulary of the language model. Indices can be obtained using [`OmDetTurboProcessor`]. See [`OmDetTurboProcessor.__call__`] for details. [What are input IDs?](../glossary#input-ids) tasks_attention_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`): Attention mask for the tasks. This is a binary mask that indicates which tokens should be attended to, and which should not. classes_structure (torch.LongTensor of shape `(batch_size)`): Structure of the classes. This tensor indicates the number of classes for each task. Examples: ```python >>> import requests >>> from PIL import Image >>> from transformers import AutoProcessor, OmDetTurboForObjectDetection >>> processor = AutoProcessor.from_pretrained("omlab/omdet-turbo-swin-tiny-hf") >>> model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> classes = ["cat", "remote"] >>> task = "Detect {}.".format(", ".join(classes)) >>> inputs = processor(image, text=classes, task=task, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) >>> results = processor.post_process_grounded_object_detection( ... outputs, ... classes=classes, ... target_sizes=[image.size[::-1]], ... score_threshold=0.3, ... nms_threshold=0.3, >>> )[0] >>> for score, class_name, box in zip(results["scores"], results["classes"], results["boxes"]): ... box = [round(i, 1) for i in box.tolist()] ... print( ... f"Detected {class_name} with confidence " ... f"{round(score.item(), 2)} at location {box}" ... ) Detected remote with confidence 0.76 at location [39.9, 71.3, 176.5, 117.9] Detected cat with confidence 0.72 at location [345.1, 22.5, 639.7, 371.9] Detected cat with confidence 0.65 at location [12.7, 53.8, 315.5, 475.3] Detected remote with confidence 0.57 at location [333.4, 75.6, 370.7, 187.0] ```""" if labels is not None: raise NotImplementedError("Training is not implemented yet") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict loss = None image_features = self.vision_backbone(pixel_values) encoder_outputs = self.encoder( image_features, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class_features, task_features, task_mask = self.get_language_embedding( classes_input_ids, classes_attention_mask, tasks_input_ids, tasks_attention_mask, classes_structure, ) encoder_extracted_states = encoder_outputs.extracted_states if return_dict else encoder_outputs[-1] decoder_outputs = self.decoder( encoder_extracted_states, class_features, task_features, task_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return tuple( output for output in [ loss, decoder_outputs[3][-1], decoder_outputs[4][-1], decoder_outputs[7], decoder_outputs[8], decoder_outputs[5], decoder_outputs[6], encoder_outputs[-1], decoder_outputs[1], decoder_outputs[2], encoder_outputs[1], encoder_outputs[2], classes_structure, ] if output is not None ) return OmDetTurboObjectDetectionOutput( loss=loss, decoder_coord_logits=decoder_outputs.decoder_coords[-1], decoder_class_logits=decoder_outputs.decoder_classes[-1], init_reference_points=decoder_outputs.init_reference_points, intermediate_reference_points=decoder_outputs.intermediate_reference_points, encoder_coord_logits=decoder_outputs.encoder_coord_logits, encoder_class_logits=decoder_outputs.encoder_class_logits, encoder_extracted_states=encoder_outputs.extracted_states, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, classes_structure=classes_structure, ) __all__ = ["OmDetTurboForObjectDetection", "OmDetTurboPreTrainedModel"]
transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py/0
{ "file_path": "transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py", "repo_id": "transformers", "token_count": 32899 }
508
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OWLv2 checkpoints from the original repository. URL: https://github.com/google-research/scenic/tree/main/scenic/projects/owl_vit""" import argparse import collections import os import jax import jax.numpy as jnp import numpy as np import torch from flax.training import checkpoints from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( CLIPTokenizer, Owlv2Config, Owlv2ForObjectDetection, Owlv2ImageProcessor, Owlv2Processor, Owlv2TextConfig, Owlv2VisionConfig, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_owlv2_config(model_name): if "large" in model_name: image_size = 1008 patch_size = 14 vision_hidden_size = 1024 vision_intermediate_size = 4096 vision_num_hidden_layers = 24 vision_num_attention_heads = 16 projection_dim = 768 text_hidden_size = 768 text_intermediate_size = 3072 text_num_attention_heads = 12 text_num_hidden_layers = 12 else: image_size = 960 patch_size = 16 vision_hidden_size = 768 vision_intermediate_size = 3072 vision_num_hidden_layers = 12 vision_num_attention_heads = 12 projection_dim = 512 text_hidden_size = 512 text_intermediate_size = 2048 text_num_attention_heads = 8 text_num_hidden_layers = 12 vision_config = Owlv2VisionConfig( patch_size=patch_size, image_size=image_size, hidden_size=vision_hidden_size, num_hidden_layers=vision_num_hidden_layers, intermediate_size=vision_intermediate_size, num_attention_heads=vision_num_attention_heads, ) text_config = Owlv2TextConfig( hidden_size=text_hidden_size, intermediate_size=text_intermediate_size, num_attention_heads=text_num_attention_heads, num_hidden_layers=text_num_hidden_layers, ) config = Owlv2Config( text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), projection_dim=projection_dim, ) return config def flatten_nested_dict(params, parent_key="", sep="/"): items = [] for k, v in params.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.MutableMapping): items.extend(flatten_nested_dict(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, model_name): rename_keys = [] # fmt: off # CLIP vision encoder rename_keys.append(("backbone/clip/visual/class_embedding", "owlv2.vision_model.embeddings.class_embedding")) rename_keys.append(("backbone/clip/visual/conv1/kernel", "owlv2.vision_model.embeddings.patch_embedding.weight")) rename_keys.append(("backbone/clip/visual/positional_embedding", "owlv2.vision_model.embeddings.position_embedding.weight")) rename_keys.append(("backbone/clip/visual/ln_pre/scale", "owlv2.vision_model.pre_layernorm.weight")) rename_keys.append(("backbone/clip/visual/ln_pre/bias", "owlv2.vision_model.pre_layernorm.bias")) for i in range(config.vision_config.num_hidden_layers): if "v2" in model_name: rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/ln_0/scale", f"owlv2.vision_model.encoder.layers.{i}.layer_norm1.weight")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/ln_0/bias", f"owlv2.vision_model.encoder.layers.{i}.layer_norm1.bias")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/ln_1/scale", f"owlv2.vision_model.encoder.layers.{i}.layer_norm2.weight")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/ln_1/bias", f"owlv2.vision_model.encoder.layers.{i}.layer_norm2.bias")) else: rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/ln_1/scale", f"owlv2.vision_model.encoder.layers.{i}.layer_norm1.weight")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/ln_1/bias", f"owlv2.vision_model.encoder.layers.{i}.layer_norm1.bias")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/ln_2/scale", f"owlv2.vision_model.encoder.layers.{i}.layer_norm2.weight")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/ln_2/bias", f"owlv2.vision_model.encoder.layers.{i}.layer_norm2.bias")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/mlp/c_fc/kernel", f"owlv2.vision_model.encoder.layers.{i}.mlp.fc1.weight")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/mlp/c_fc/bias", f"owlv2.vision_model.encoder.layers.{i}.mlp.fc1.bias")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/mlp/c_proj/kernel", f"owlv2.vision_model.encoder.layers.{i}.mlp.fc2.weight")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/mlp/c_proj/bias", f"owlv2.vision_model.encoder.layers.{i}.mlp.fc2.bias")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/attn/query/kernel", f"owlv2.vision_model.encoder.layers.{i}.self_attn.q_proj.weight")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/attn/query/bias", f"owlv2.vision_model.encoder.layers.{i}.self_attn.q_proj.bias")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/attn/key/kernel", f"owlv2.vision_model.encoder.layers.{i}.self_attn.k_proj.weight")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/attn/key/bias", f"owlv2.vision_model.encoder.layers.{i}.self_attn.k_proj.bias")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/attn/value/kernel", f"owlv2.vision_model.encoder.layers.{i}.self_attn.v_proj.weight")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/attn/value/bias", f"owlv2.vision_model.encoder.layers.{i}.self_attn.v_proj.bias")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/attn/out/kernel", f"owlv2.vision_model.encoder.layers.{i}.self_attn.out_proj.weight")) rename_keys.append((f"backbone/clip/visual/transformer/resblocks.{i}/attn/out/bias", f"owlv2.vision_model.encoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append(("backbone/clip/visual/ln_post/scale", "owlv2.vision_model.post_layernorm.weight")) rename_keys.append(("backbone/clip/visual/ln_post/bias", "owlv2.vision_model.post_layernorm.bias")) # CLIP text encoder rename_keys.append(("backbone/clip/text/token_embedding/embedding", "owlv2.text_model.embeddings.token_embedding.weight")) rename_keys.append(("backbone/clip/text/positional_embedding", "owlv2.text_model.embeddings.position_embedding.weight")) for i in range(config.text_config.num_hidden_layers): if "v2" in model_name: rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/ln_0/scale", f"owlv2.text_model.encoder.layers.{i}.layer_norm1.weight")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/ln_0/bias", f"owlv2.text_model.encoder.layers.{i}.layer_norm1.bias")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/ln_1/scale", f"owlv2.text_model.encoder.layers.{i}.layer_norm2.weight")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/ln_1/bias", f"owlv2.text_model.encoder.layers.{i}.layer_norm2.bias")) else: rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/ln_1/scale", f"owlv2.text_model.encoder.layers.{i}.layer_norm1.weight")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/ln_1/bias", f"owlv2.text_model.encoder.layers.{i}.layer_norm1.bias")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/ln_2/scale", f"owlv2.text_model.encoder.layers.{i}.layer_norm2.weight")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/ln_2/bias", f"owlv2.text_model.encoder.layers.{i}.layer_norm2.bias")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/mlp/c_fc/kernel", f"owlv2.text_model.encoder.layers.{i}.mlp.fc1.weight")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/mlp/c_fc/bias", f"owlv2.text_model.encoder.layers.{i}.mlp.fc1.bias")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/mlp/c_proj/kernel", f"owlv2.text_model.encoder.layers.{i}.mlp.fc2.weight")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/mlp/c_proj/bias", f"owlv2.text_model.encoder.layers.{i}.mlp.fc2.bias")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/attn/query/kernel", f"owlv2.text_model.encoder.layers.{i}.self_attn.q_proj.weight")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/attn/query/bias", f"owlv2.text_model.encoder.layers.{i}.self_attn.q_proj.bias")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/attn/key/kernel", f"owlv2.text_model.encoder.layers.{i}.self_attn.k_proj.weight")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/attn/key/bias", f"owlv2.text_model.encoder.layers.{i}.self_attn.k_proj.bias")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/attn/value/kernel", f"owlv2.text_model.encoder.layers.{i}.self_attn.v_proj.weight")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/attn/value/bias", f"owlv2.text_model.encoder.layers.{i}.self_attn.v_proj.bias")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/attn/out/kernel", f"owlv2.text_model.encoder.layers.{i}.self_attn.out_proj.weight")) rename_keys.append((f"backbone/clip/text/transformer/resblocks.{i}/attn/out/bias", f"owlv2.text_model.encoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append(("backbone/clip/text/ln_final/scale", "owlv2.text_model.final_layer_norm.weight")) rename_keys.append(("backbone/clip/text/ln_final/bias", "owlv2.text_model.final_layer_norm.bias")) # logit scale rename_keys.append(("backbone/clip/logit_scale", "owlv2.logit_scale")) # projection heads rename_keys.append(("backbone/clip/text/text_projection/kernel", "owlv2.text_projection.weight")) # class and box heads rename_keys.append(("backbone/merged_class_token/scale", "layer_norm.weight")) rename_keys.append(("backbone/merged_class_token/bias", "layer_norm.bias")) rename_keys.append(("class_head/Dense_0/kernel", "class_head.dense0.weight")) rename_keys.append(("class_head/Dense_0/bias", "class_head.dense0.bias")) rename_keys.append(("class_head/logit_shift/kernel", "class_head.logit_shift.weight")) rename_keys.append(("class_head/logit_scale/kernel", "class_head.logit_scale.weight")) rename_keys.append(("class_head/logit_scale/bias", "class_head.logit_scale.bias")) rename_keys.append(("class_head/logit_shift/bias", "class_head.logit_shift.bias")) rename_keys.append(("obj_box_head/Dense_0/kernel", "box_head.dense0.weight")) rename_keys.append(("obj_box_head/Dense_0/bias", "box_head.dense0.bias")) rename_keys.append(("obj_box_head/Dense_1/kernel", "box_head.dense1.weight")) rename_keys.append(("obj_box_head/Dense_1/bias", "box_head.dense1.bias")) rename_keys.append(("obj_box_head/Dense_2/kernel", "box_head.dense2.weight")) rename_keys.append(("obj_box_head/Dense_2/bias", "box_head.dense2.bias")) # objectness head (only for v2) if "v2" in model_name: rename_keys.append(("objectness_head/Dense_0/kernel", "objectness_head.dense0.weight")) rename_keys.append(("objectness_head/Dense_0/bias", "objectness_head.dense0.bias")) rename_keys.append(("objectness_head/Dense_1/kernel", "objectness_head.dense1.weight")) rename_keys.append(("objectness_head/Dense_1/bias", "objectness_head.dense1.bias")) rename_keys.append(("objectness_head/Dense_2/kernel", "objectness_head.dense2.weight")) rename_keys.append(("objectness_head/Dense_2/bias", "objectness_head.dense2.bias")) # fmt: on return rename_keys def rename_and_reshape_key(dct, old, new, config): val = dct.pop(old) if ("out_proj" in new or "v_proj" in new or "k_proj" in new or "q_proj" in new) and "vision" in new: val = val.reshape(-1, config.vision_config.hidden_size) if ("out_proj" in new or "v_proj" in new or "k_proj" in new or "q_proj" in new) and "text" in new: val = val.reshape(-1, config.text_config.hidden_size) if "patch_embedding" in new: print("Reshaping patch embedding... for", new) val = val.transpose(3, 2, 0, 1) elif new.endswith("weight") and "position_embedding" not in new and "token_embedding" not in new: val = val.T if new.endswith("bias"): val = val.reshape(-1) dct[new] = torch.from_numpy(np.array(val)) @torch.no_grad() def convert_owlv2_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub, verify_logits): """ Copy/paste/tweak model's weights to our OWL-ViT structure. """ config = get_owlv2_config(model_name) # see available checkpoints at https://github.com/google-research/scenic/tree/main/scenic/projects/owl_vit#pretrained-checkpoints variables = checkpoints.restore_checkpoint(checkpoint_path, target=None) variables = variables["params"] if "v2" in model_name else variables["optimizer"]["target"] flax_params = jax.tree_util.tree_map(lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, variables) state_dict = flatten_nested_dict(flax_params) # Rename keys rename_keys = create_rename_keys(config, model_name) for src, dest in rename_keys: rename_and_reshape_key(state_dict, src, dest, config) # load HuggingFace model model = Owlv2ForObjectDetection(config) missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) assert missing_keys == ["owlv2.visual_projection.weight"] assert unexpected_keys == [] model.eval() # Initialize image processor size = {"height": config.vision_config.image_size, "width": config.vision_config.image_size} image_processor = Owlv2ImageProcessor(size=size) # Initialize tokenizer tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32", pad_token="!", model_max_length=16) # Initialize processor processor = Owlv2Processor(image_processor=image_processor, tokenizer=tokenizer) # Verify pixel_values and input_ids filepath = hf_hub_download(repo_id="nielsr/test-image", filename="owlvit_pixel_values_960.pt", repo_type="dataset") original_pixel_values = torch.load(filepath, weights_only=True).permute(0, 3, 1, 2) filepath = hf_hub_download(repo_id="nielsr/test-image", filename="owlv2_input_ids.pt", repo_type="dataset") original_input_ids = torch.load(filepath, weights_only=True).squeeze() filepath = hf_hub_download(repo_id="adirik/OWL-ViT", repo_type="space", filename="assets/astronaut.png") image = Image.open(filepath) texts = [["face", "rocket", "nasa badge", "star-spangled banner"]] inputs = processor(text=texts, images=image, return_tensors="pt") if "large" not in model_name: assert torch.allclose(inputs.pixel_values, original_pixel_values.float(), atol=1e-6) assert torch.allclose(inputs.input_ids[:4, :], original_input_ids[:4, :], atol=1e-6) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits pred_boxes = outputs.pred_boxes objectness_logits = outputs.objectness_logits if verify_logits: if model_name == "owlv2-base-patch16": expected_logits = torch.tensor( [[-10.0043, -9.0226, -8.0433], [-12.4569, -14.0380, -12.6153], [-21.0731, -22.2705, -21.8850]] ) expected_boxes = torch.tensor( [[0.0136, 0.0223, 0.0269], [0.0406, 0.0327, 0.0797], [0.0638, 0.1539, 0.1255]] ) expected_objectness_logits = torch.tensor( [[-5.6589, -7.7702, -16.3965]], ) elif model_name == "owlv2-base-patch16-finetuned": expected_logits = torch.tensor( [[-9.2391, -9.2313, -8.0295], [-14.5498, -16.8450, -14.7166], [-15.1278, -17.3060, -15.7169]], ) expected_boxes = torch.tensor( [[0.0103, 0.0094, 0.0207], [0.0483, 0.0729, 0.1013], [0.0629, 0.1396, 0.1313]] ) expected_objectness_logits = torch.tensor( [[-6.5234, -13.3788, -14.6627]], ) elif model_name == "owlv2-base-patch16-ensemble": expected_logits = torch.tensor( [[-8.6353, -9.5409, -6.6154], [-7.9442, -9.6151, -6.7117], [-12.4593, -15.3332, -12.1048]] ) expected_boxes = torch.tensor( [[0.0126, 0.0090, 0.0238], [0.0387, 0.0227, 0.0754], [0.0582, 0.1058, 0.1139]] ) expected_objectness_logits = torch.tensor( [[-6.0628, -5.9507, -10.4486]], ) elif model_name == "owlv2-large-patch14": expected_logits = torch.tensor( [[-12.6662, -11.8384, -12.1880], [-16.0599, -16.5835, -16.9364], [-21.4957, -26.7038, -25.1313]], ) expected_boxes = torch.tensor( [[0.0136, 0.0161, 0.0256], [0.0126, 0.0135, 0.0202], [0.0498, 0.0948, 0.0915]], ) expected_objectness_logits = torch.tensor( [[-6.7196, -9.4590, -13.9472]], ) elif model_name == "owlv2-large-patch14-finetuned": expected_logits = torch.tensor( [[-9.5413, -9.7130, -7.9762], [-9.5731, -9.7277, -8.2252], [-15.4434, -19.3084, -16.5490]], ) expected_boxes = torch.tensor( [[0.0089, 0.0080, 0.0175], [0.0112, 0.0098, 0.0179], [0.0375, 0.0821, 0.0528]], ) expected_objectness_logits = torch.tensor( [[-6.2655, -6.5845, -11.3105]], ) elif model_name == "owlv2-large-patch14-ensemble": expected_logits = torch.tensor( [[-12.2037, -12.2070, -11.5371], [-13.4875, -13.8235, -13.1586], [-18.2007, -22.9834, -20.6816]], ) expected_boxes = torch.tensor( [[0.0126, 0.0127, 0.0222], [0.0107, 0.0113, 0.0164], [0.0482, 0.1162, 0.0885]], ) expected_objectness_logits = torch.tensor( [[-7.7572, -8.3637, -13.0334]], ) print("Objectness logits:", objectness_logits[:3, :3]) print("Logits:", logits[0, :3, :3]) print("Pred boxes:", pred_boxes[0, :3, :3]) assert torch.allclose(logits[0, :3, :3], expected_logits, atol=1e-3) assert torch.allclose(pred_boxes[0, :3, :3], expected_boxes, atol=1e-3) assert torch.allclose(objectness_logits[:3, :3], expected_objectness_logits, atol=1e-3) print("Looks ok!") else: print("Model converted without verifying logits") if pytorch_dump_folder_path is not None: print("Saving model and processor locally...") # Create folder to save model if not os.path.isdir(pytorch_dump_folder_path): os.mkdir(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing {model_name} to the hub...") model.push_to_hub(f"google/{model_name}") processor.push_to_hub(f"google/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="owlv2-base-patch16", choices=[ "owlv2-base-patch16", "owlv2-base-patch16-finetuned", "owlv2-base-patch16-ensemble", "owlv2-large-patch14", "owlv2-large-patch14-finetuned", "owlv2-large-patch14-ensemble", ], type=str, help="Name of the Owlv2 model you'd like to convert from FLAX to PyTorch.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the original Flax checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--verify_logits", action="store_false", required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub") args = parser.parse_args() convert_owlv2_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.verify_logits )
transformers/src/transformers/models/owlv2/convert_owlv2_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/owlv2/convert_owlv2_to_hf.py", "repo_id": "transformers", "token_count": 9933 }
509
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert PaliGemma2 checkpoints from the original repository.""" import argparse import collections import jax.numpy as jnp import ml_dtypes import numpy as np import torch from transformers import ( AutoTokenizer, Gemma2Config, PaliGemmaConfig, PaliGemmaForConditionalGeneration, PaliGemmaProcessor, SiglipImageProcessor, ) from transformers.tokenization_utils_base import AddedToken from transformers.utils import logging device = "cpu" logging.set_verbosity_info() logger = logging.get_logger(__name__) # TODO add sequence length variations here PALIGEMMA2_VARIANTS = ["2b-224", "2b-448", "2b-896", "9b-224", "9b-448", "9b-896", "27b-224", "27b-448", "27b-896"] VARIANT_CONFIGS = { "2b": { "num_positions": 256, "hidden_size": 2304, "num_hidden_layers": 26, "intermediate_size": 9216, "num_key_value_heads": 4, "num_attention_heads": 8, "head_dim": 256, "query_pre_attn_scalar": 256, }, "9b": { "num_positions": 1024, "hidden_size": 3584, "num_hidden_layers": 42, "intermediate_size": 14336, "num_key_value_heads": 8, "num_attention_heads": 16, "head_dim": 256, "query_pre_attn_scalar": 256, }, "27b": { "num_positions": 4096, "hidden_size": 4608, "num_hidden_layers": 46, "intermediate_size": 36864, "num_key_value_heads": 16, "num_attention_heads": 32, "head_dim": 128, "query_pre_attn_scalar": 4608 // 32, # scaling is different for the 28b }, } DTYPES = {"float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16} def get_paligemma2_config(variant: str, precision: str): config = { "image_token_id": None, "pad_token_id": 0, "bos_token_id": 2, "eos_token_id": 1, } base_variant = variant.split("-")[0] if variant in PALIGEMMA2_VARIANTS: image_size = int(variant.split("-")[1]) variant_config = VARIANT_CONFIGS[base_variant] patch_size = 14 num_image_tokens = (image_size**2) // (patch_size**2) config["projection_dim"] = variant_config["hidden_size"] config["image_token_id"] = 257152 config["num_hidden_layers"] = variant_config["num_hidden_layers"] # For generate text_config = Gemma2Config.from_pretrained("google/gemma-2-2b-it").to_dict() sup_text_config = { "model_type": "gemma2", "vocab_size": 257152, "num_hidden_layers": variant_config["num_hidden_layers"], "num_key_value_heads": variant_config["num_key_value_heads"], "head_dim": variant_config["head_dim"], "dtype": precision, "hidden_size": variant_config["hidden_size"], "hidden_activation": "gelu_pytorch_tanh", "num_attention_heads": variant_config["num_attention_heads"], "intermediate_size": variant_config["intermediate_size"], "is_encoder_decoder": False, "query_pre_attn_scalar": variant_config["query_pre_attn_scalar"], } text_config.update(sup_text_config) vision_config = { "num_positions": variant_config["num_positions"], # not useful, to remove "dtype": precision, "image_size": image_size, "patch_size": patch_size, "num_image_tokens": num_image_tokens, "hidden_size": 1152, "intermediate_size": 4304, "num_hidden_layers": 27, "num_attention_heads": 16, "projection_dim": variant_config["hidden_size"], "hidden_act": "gelu_pytorch_tanh", "vision_use_head": False, } final_config = PaliGemmaConfig(text_config=text_config, vision_config=vision_config, **config) else: raise ValueError(f"Identifier {variant} not supported. Available: {PALIGEMMA2_VARIANTS}") return final_config def slice_state_dict(state_dict, config): # fmt: off # patch embeddings state_dict["vision_tower.vision_model.embeddings.patch_embedding.weight"] = state_dict.pop("img/embedding/kernel").transpose( 3, 2, 0, 1 ) state_dict["vision_tower.vision_model.embeddings.patch_embedding.bias"] = state_dict.pop("img/embedding/bias") # positional embeddings state_dict["vision_tower.vision_model.embeddings.position_embedding.weight"] = state_dict.pop("img/pos_embedding").reshape( -1, config.vision_config.hidden_size ) # extract vision layers to be sliced at index 0. There are 27 layers in the base model. encoderblock_layernorm0_scale = state_dict.pop("img/Transformer/encoderblock/LayerNorm_0/scale") encoderblock_layernorm0_bias = state_dict.pop("img/Transformer/encoderblock/LayerNorm_0/bias") encoderblock_layernorm1_scale = state_dict.pop("img/Transformer/encoderblock/LayerNorm_1/scale") encoderblock_layernorm1_bias = state_dict.pop("img/Transformer/encoderblock/LayerNorm_1/bias") encoderblock_mlp_dense0_kernel= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_0/kernel") encoderblock_mlp_dense0_bias= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_0/bias") encoderblock_mlp_dense1_kernel= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_1/kernel") encoderblock_mlp_dense1_bias= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_1/bias") encoderblock_attention_0_key_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/kernel") encoderblock_attention_0_key_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/bias") encoderblock_attention_0_value_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/kernel") encoderblock_attention_0_value_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/bias") encoderblock_attention_0_query_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/kernel") encoderblock_attention_0_query_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/bias") encoderblock_attention_0_out_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/kernel") encoderblock_attention_0_out_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/bias") for i in range(config.vision_config.num_hidden_layers): state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm1.weight"] = encoderblock_layernorm0_scale[i].transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm1.bias"] = encoderblock_layernorm0_bias[i] state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm2.weight"] = encoderblock_layernorm1_scale[i].transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm2.bias"] = encoderblock_layernorm1_bias[i] state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.weight"] = encoderblock_mlp_dense0_kernel[i].transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.bias"] = encoderblock_mlp_dense0_bias[i] state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.weight"] = encoderblock_mlp_dense1_kernel[i].transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.bias"] = encoderblock_mlp_dense1_bias[i] state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.weight"] = encoderblock_attention_0_key_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.bias"] = encoderblock_attention_0_key_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.weight"] = encoderblock_attention_0_value_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.bias"] = encoderblock_attention_0_value_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.weight"] = encoderblock_attention_0_query_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.bias"] = encoderblock_attention_0_query_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.weight"] = encoderblock_attention_0_out_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.bias"] = encoderblock_attention_0_out_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict["vision_tower.vision_model.post_layernorm.weight"] = state_dict.pop("img/Transformer/encoder_norm/scale").transpose() state_dict["vision_tower.vision_model.post_layernorm.bias"] = state_dict.pop("img/Transformer/encoder_norm/bias") # multimodal projector state_dict['multi_modal_projector.linear.weight'] = state_dict.pop("img/head/kernel").transpose() state_dict['multi_modal_projector.linear.bias'] = state_dict.pop("img/head/bias") # text decoder (gemma) embedding_vector = state_dict.pop("llm/embedder/input_embedding") state_dict["language_model.model.embed_tokens.weight"] = embedding_vector # pop the einsum attention + mlp representations. There are 26 layers in gemma2-2b. llm_attention_attn_vec_einsum = state_dict.pop("llm/layers/attn/attn_vec_einsum/w") # (26, 2, 4, 2304, 256) for 2b-224, 4 kv heads and 26 layers llm_attention_kv_einsum = state_dict.pop("llm/layers/attn/kv_einsum/w") llm_attention_q_einsum = state_dict.pop("llm/layers/attn/q_einsum/w") llm_mlp_gating_einsum = state_dict.pop("llm/layers/mlp/gating_einsum") llm_mlp_linear = state_dict.pop("llm/layers/mlp/linear") # TODO verify correctness of layer norm loading llm_input_layernorm = state_dict.pop("llm/layers/pre_attention_norm/scale") llm_pre_feedforward_layernorm = state_dict.pop("llm/layers/pre_ffw_norm/scale") llm_post_attention_layernorm = state_dict.pop("llm/layers/post_attention_norm/scale") llm_post_feedforward_layernorm = state_dict.pop("llm/layers/post_ffw_norm/scale") for i in range(config.text_config.num_hidden_layers): # llm_attention_q_einsum[i].shape = (8, 2048, 256) # q_proj_weight_reshaped = llm_attention_q_einsum[i].transpose(0, 2, 1).reshape(config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size) """ q shape (8, 2304, 256) k shape (4, 2304, 256) v shape (4, 2304, 256) o shape (8, 256, 2304) """ q_transpose = (0, 2, 1) k_transpose = (0, 2, 1) v_transpose = (0, 2, 1) o_transpose = (2, 0, 1) q_weight_matrices = llm_attention_q_einsum[i].transpose(*q_transpose) q_proj_weight_reshaped = q_weight_matrices q_proj_weight_reshaped = q_proj_weight_reshaped.reshape(config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size) state_dict[f"language_model.model.layers.{i}.self_attn.q_proj.weight"] = q_proj_weight_reshaped # Shape: (4, 2304, 256) k_weight_matrices = llm_attention_kv_einsum[i, 0].transpose(*k_transpose) k_proj_weight_reshaped = k_weight_matrices.reshape( config.text_config.num_key_value_heads * config.text_config.head_dim, config.text_config.hidden_size ) state_dict[f"language_model.model.layers.{i}.self_attn.k_proj.weight"] = k_proj_weight_reshaped # llm_attention_kv_einsum[i, 1].shape = (num_key_value_heads, hidden_size, head_dim) v_weight_matrices = llm_attention_kv_einsum[i, 1].transpose(*v_transpose) # Shape: (4, 2304, 256) v_proj_weight_reshaped = v_weight_matrices.reshape( config.text_config.num_key_value_heads * config.text_config.head_dim, config.text_config.hidden_size ) state_dict[f"language_model.model.layers.{i}.self_attn.v_proj.weight"] = v_proj_weight_reshaped # output projection. # llm_attention_attn_vec_einsum[i].shape = (8, 256, 2304) o_proj_weight_reshaped = llm_attention_attn_vec_einsum[i].transpose(*o_transpose).reshape(config.text_config.hidden_size, config.text_config.num_attention_heads * config.text_config.head_dim) state_dict[f"language_model.model.layers.{i}.self_attn.o_proj.weight"] = o_proj_weight_reshaped # mlp layers gate_proj_weight = llm_mlp_gating_einsum[i, 0] state_dict[f"language_model.model.layers.{i}.mlp.gate_proj.weight"] = gate_proj_weight.transpose() up_proj_weight = llm_mlp_gating_einsum[i, 1] state_dict[f"language_model.model.layers.{i}.mlp.up_proj.weight"] = up_proj_weight.transpose() state_dict[f"language_model.model.layers.{i}.mlp.down_proj.weight"] = llm_mlp_linear[i].transpose() state_dict[f"language_model.model.layers.{i}.input_layernorm.weight"] = llm_input_layernorm[i] state_dict[f"language_model.model.layers.{i}.post_attention_layernorm.weight"] = llm_post_attention_layernorm[i] state_dict[f"language_model.model.layers.{i}.pre_feedforward_layernorm.weight"] = llm_pre_feedforward_layernorm[i] state_dict[f"language_model.model.layers.{i}.post_feedforward_layernorm.weight"] = llm_post_feedforward_layernorm[i] state_dict["language_model.model.norm.weight"] = state_dict.pop("llm/final_norm/scale") state_dict["language_model.lm_head.weight"] = embedding_vector # weights are tied. [k for k in state_dict if not k.startswith('vision') and not k.startswith('language')] # fmt: on for key, value in state_dict.items(): if not isinstance(value, torch.Tensor): try: if value.dtype == jnp.bfloat16: value = jnp.array(value).astype(jnp.float32) value = np.array(value) state_dict[key] = torch.from_numpy(value).to(torch.bfloat16) else: state_dict[key] = torch.from_numpy(value) except Exception as initial_exception: raise ValueError(f"Conversion failed from jax weights with {initial_exception}. Check your inputs.") return state_dict def flatten_nested_dict(params, parent_key="", sep="/", precision: int = "float32"): items = [] for k, v in params.items(): k = k.removeprefix("params/") new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.abc.MutableMapping): items.extend(flatten_nested_dict(v, parent_key=new_key, sep=sep, precision=precision).items()) else: if precision == "bfloat16": try: v = v.view(ml_dtypes.bfloat16) except Exception as initial_exception: raise ValueError(f"Conversion failed from bfloat16 with {initial_exception}, check your inputs.") items.append((new_key, v)) return dict(items) @torch.no_grad() def convert_paligemma2_checkpoint( checkpoint_path, pytorch_dump_folder_path, variant: str, precision: str, do_convert_weights=False, ): """ Read checkpoints from flax npz files, rename/reshape, send result to state dict and verify logits if needed. """ config = get_paligemma2_config(variant, precision=precision) if do_convert_weights: tokenizer_id = "google/paligemma-3b-pt-224" # same tokenizer as paligemma 1 tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) image_token = AddedToken("<image>", normalized=False, special=True) tokens_to_add = {"additional_special_tokens": [image_token]} tokenizer.add_special_tokens(tokens_to_add) # tokenizer.padding_side = 'right' # uncomment for testing purposes only. image_processor = SiglipImageProcessor.from_pretrained("google/paligemma-3b-pt-224") image_processor.size = {"width": config.vision_config.image_size, "height": config.vision_config.image_size} image_processor.image_seq_length = config.vision_config.num_image_tokens processor = PaliGemmaProcessor(image_processor=image_processor, tokenizer=tokenizer) data = jnp.load(checkpoint_path) state_dict = flatten_nested_dict(data, precision=precision) del data state_dict_transformers = slice_state_dict(state_dict, config) del state_dict del config.hidden_size # this key is unused model = PaliGemmaForConditionalGeneration(config).to(device).eval() model.load_state_dict(state_dict_transformers) del state_dict_transformers model.config.text_config._attn_implementation = "sdpa" # model expansion to get random embeds of image tokens pad_shape = 64 # for performance reasons pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data mu = torch.mean(pre_expansion_embeddings, dim=0).float() n = pre_expansion_embeddings.size()[0] sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma) # We add an image token so we resize the model model.resize_token_embeddings(config.text_config.vocab_size + 2, pad_shape) model.language_model.model.embed_tokens.weight.data[257152:] = torch.stack( tuple(dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[257152:].shape[0])), dim=0, ) model.language_model.lm_head.weight.data[257152:] = torch.stack( tuple(dist.sample() for _ in range(model.language_model.lm_head.weight.data[257152:].shape[0])), dim=0, ) # convert to needed precision model.to(DTYPES[precision]) model.save_pretrained(pytorch_dump_folder_path, safe_serialization=True) processor.save_pretrained(pytorch_dump_folder_path) else: processor = PaliGemmaProcessor.from_pretrained(pytorch_dump_folder_path, do_rescale=False) model = ( PaliGemmaForConditionalGeneration.from_pretrained(pytorch_dump_folder_path, attn_implementation="sdpa") .to(device) .eval() ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the .npz checkpoint", ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output directory where model and processor will be saved.", ) parser.add_argument( "--precision", choices=["float32", "bfloat16", "float16"], type=str, help="Precision identifier for model conversion - should match the base checkpoint precision.", ) parser.add_argument( "--variant", default="2b-224", choices=PALIGEMMA2_VARIANTS, type=str, help="String identifier of the paligemma2 variant to convert.", ) parser.add_argument( "--do_convert_weights", action="store_true", help="Whether or not to reload and convert the weights." ) args = parser.parse_args() convert_paligemma2_checkpoint( checkpoint_path=args.checkpoint_path, pytorch_dump_folder_path=args.pytorch_dump_folder_path, variant=args.variant, precision=args.precision, do_convert_weights=args.do_convert_weights, )
transformers/src/transformers/models/paligemma/convert_paligemma2_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/paligemma/convert_paligemma2_weights_to_hf.py", "repo_id": "transformers", "token_count": 8958 }
510
# coding=utf-8 # Copyright 2020 Google and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import Any, Optional import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging from ...utils.import_utils import requires SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} logger = logging.get_logger(__name__) # TODO ArthurZ refactor this to only use the added_tokens_encoder @requires(backends=("sentencepiece",)) class PegasusTokenizer(PreTrainedTokenizer): r""" Construct a PEGASUS tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. mask_token (`str`, *optional*, defaults to `"<mask_2>"`): The token used for masking single token values. This is the token used when training this model with masked language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining. It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://huggingface.co/papers/1912.08777). mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`): The token used for masking whole target sentences. This is the token used when training this model with gap sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://huggingface.co/papers/1912.08777). additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and <unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66) that uses the tokens 2 - 104 only for pretraining sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, pad_token="<pad>", eos_token="</s>", unk_token="<unk>", mask_token="<mask_2>", mask_token_sent="<mask_1>", additional_special_tokens=None, offset=103, # entries 2 - 104 are only used for pretraining sp_model_kwargs: Optional[dict[str, Any]] = None, **kwargs, ) -> None: self.offset = offset if additional_special_tokens is not None: if not isinstance(additional_special_tokens, list): raise TypeError( f"additional_special_tokens should be of type {type(list)}, but is" f" {type(additional_special_tokens)}" ) additional_special_tokens_extended = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1) ] if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." ) additional_special_tokens = additional_special_tokens_extended else: additional_special_tokens_extended = [] additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)] self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.mask_token_sent = mask_token_sent self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) _added_tokens_decoder = { 0: AddedToken(str(pad_token), special=True), 1: AddedToken(str(eos_token), special=True), } if self.mask_token_sent is not None: _added_tokens_decoder[2] = AddedToken(mask_token_sent, special=True) _added_tokens_decoder[3] = AddedToken(str(mask_token), special=True) for i in range(2, self.offset): _added_tokens_decoder[len(_added_tokens_decoder)] = AddedToken(f"<unk_{i}>", special=True) # Force update as we want to make sure vocab is enforced (same as fast) self._added_tokens_decoder = kwargs.pop("added_tokens_decoder", {}) self._added_tokens_decoder.update(_added_tokens_decoder) super().__init__( eos_token=eos_token, unk_token=unk_token, mask_token=mask_token, pad_token=pad_token, mask_token_sent=mask_token_sent, offset=offset, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) @property def vocab_size(self) -> int: return len(self.sp_model) + self.offset def get_vocab(self) -> dict[str, int]: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> list[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) to an id using the vocab.""" sp_id = self.sp_model.piece_to_id(token) return sp_id + self.offset def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) to a token (str) using the vocab.""" if index < self.offset: return self.sp_model.IdToPiece(index) token = self.sp_model.IdToPiece(index - self.offset) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def num_special_tokens_to_add(self, pair=False): """Just EOS""" return 1 def _special_token_mask(self, seq): all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def get_special_tokens_mask( self, token_ids_0: list, token_ids_1: Optional[list] = None, already_has_special_tokens: bool = False ) -> list[int]: """Get list where entries are [1] if a token is [eos] or [pad] else 0.""" if already_has_special_tokens: return self._special_token_mask(token_ids_0) elif token_ids_1 is None: return self._special_token_mask(token_ids_0) + [1] else: return self._special_token_mask(token_ids_0 + token_ids_1) + [1] def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating and adding special tokens. A PEGASUS sequence has the following format, where `X` represents the sequence: - single sequence: `X </s>` - pair of sequences: `A B </s>` (not intended use) BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_0 + token_ids_1 + [self.eos_token_id] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) __all__ = ["PegasusTokenizer"]
transformers/src/transformers/models/pegasus/tokenization_pegasus.py/0
{ "file_path": "transformers/src/transformers/models/pegasus/tokenization_pegasus.py", "repo_id": "transformers", "token_count": 5573 }
511
# Copyright 2025 Meta Platforms, Inc. and the HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for PerceptionLM.""" import math from functools import reduce from typing import Optional, Union import numpy as np from ...image_processing_utils import ( BatchFeature, ) from ...image_processing_utils_fast import ( BaseImageProcessorFast, DefaultFastImageProcessorKwargs, get_image_size, group_images_by_shape, reorder_images, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, PILImageResampling, ) from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, is_torch_available, is_torchvision_available, ) if is_torch_available(): import torch if is_torchvision_available(): from torchvision.transforms import functional as F class PerceptionLMFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): r""" vision_input_type (`str`, *optional*, defaults to `"thumb+tile"`): Vision processing strategy. `"thumb+tile"` uses both thumbnails and multiple tiles for multi-scale processing, otherwise uses single tile for lower memory usage. tile_size (`int`, *optional*, defaults to `448`): Height and width dimension (in pixels) of each tile used for image processing. max_num_tiles (`int`, *optional*, defaults to `36`): Maximum number of tiles an image can be split into based on its aspect ratio. """ vision_input_type: str = "thumb+tile" tile_size: int = 448 max_num_tiles: int = 36 @auto_docstring class PerceptionLMImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD do_resize = True do_center_crop = False do_rescale = True do_normalize = True do_convert_rgb = True size = {"width": 448, "height": 448} # for backward compatibility in tests valid_kwargs = PerceptionLMFastImageProcessorKwargs def __init__(self, **kwargs: Unpack[PerceptionLMFastImageProcessorKwargs]) -> None: super().__init__(**kwargs) @auto_docstring def preprocess(self, images, **kwargs: Unpack[PerceptionLMFastImageProcessorKwargs]) -> BatchFeature: return super().preprocess(images, **kwargs) @staticmethod def _factors(n: int): """Return all factors of a number.""" return set( reduce( list.__add__, ([i, n // i] for i in range(1, int(n**0.5) + 1) if n % i == 0), ) ) def _find_supported_aspect_ratios(self): """ This function computes all the allowed aspect ratios for a fixed number of input chunks. The order of returned items matters for the result of `_fit_image_to_canvas` function. If tie exists in `_fit_image_to_canvas`, the latter in `_find_supported_aspect_ratios` wins. For example, with `num_tiles=5`, it will return: { 0.2: [(1, 5)], 5.0: [(5, 1)], 0.25: [(1, 4)], 1.0: [(2, 2), (1, 1)], 4.0: [(4, 1)], 0.3333333333333333: [(1, 3)], 3.0: [(3, 1)], 0.5: [(1, 2)], 2.0: [(2, 1)] } """ asp_dict = {} for chunk_size in range(self.max_num_tiles, 0, -1): _factors = sorted(self._factors(chunk_size)) _asp_ratios = [(x, chunk_size // x) for x in _factors] for ratio in _asp_ratios: k = ratio[0] / ratio[1] if k not in asp_dict: asp_dict[k] = [ratio] else: asp_dict[k].append(ratio) return asp_dict def _get_image_height_width( self, image_width: int, image_height: int, target_width: int, target_height: int ) -> tuple[int, int]: """ Given image width, height and target width, height for the canvas, return the dimensions of how the image would be resized with aspect ratio preservation. """ scale = image_width / image_height if scale > 1.0: # Width is larger than height # Rescaling factor is the minimum of the two scaling factors. Else one side would be outside of the canvas. rescaling_factor = min(target_width / image_width, target_height / image_height) # Set new width to target width and height to the rescaled height. new_w = rescaling_factor * image_width new_h = math.floor(new_w / scale) else: # Height is larger than width # Rescaling factor is the minimum of the two scaling factors. Else one side would be outside of the canvas. rescaling_factor = min(target_width / image_width, target_height / image_height) # Set new height to target height and width to the rescaled width. new_h = rescaling_factor * image_height new_w = math.floor(new_h * scale) return new_w, new_h def _fit_image_to_canvas(self, img_width: int, img_height: int, tile_size: int): """ Given an image width, height and target number of chunks this function will see if the image can be fit into any of the canvases that can be build from arranging the tiles in a grid. If the image can be fit onto several canvases, it will return the canvas where the shorter edge of the image will be largest. """ # Initialize the optimal canvas to None. If no canvas is found where image fits, function returns None. optimal_canvas = None optimal_image_width_height = None scale = img_width / img_height # Gather all potential supported image resolutions and iterate through them to find best match potential_arrangements = [ item for sublist in self._find_supported_aspect_ratios().values() for item in sublist ] for n_w, n_h in potential_arrangements: # Compute the canvas size canvas_width, canvas_height = n_w * tile_size, n_h * tile_size # Check if image can fit into the canvas without downsampling if canvas_width >= img_width and canvas_height >= img_height: # If we did not find a good canvas yet, we will use the current one if optimal_canvas is None: # Set optimal canvas and determine the actual image height and width in the canvas with aspect ratio preserving resampling optimal_canvas = (n_w, n_h) optimal_image_width_height = self._get_image_height_width( image_width=img_width, image_height=img_height, target_width=n_w * tile_size, target_height=n_h * tile_size, ) else: # If we already found an optimal canvas before, we will check if the shorter edge of the image will be larger than the current optimal canvas. # This means we can potentially upsample the image resolution which is beneficial to performance. image_width_height = self._get_image_height_width( image_width=img_width, image_height=img_height, target_width=n_w * tile_size, target_height=n_h * tile_size, ) # Llama3V dynamic tiling. Priortize biggest canvas. if (scale < 1.0 and (image_width_height[0] >= optimal_image_width_height[0])) or ( scale >= 1.0 and (image_width_height[1] >= optimal_image_width_height[1]) ): optimal_canvas = (n_w, n_h) optimal_image_width_height = image_width_height return optimal_canvas def _find_closest_aspect_ratio(self, img_width: int, img_height: int, tile_size: int) -> tuple: """ Given an image width, height and target number of chunks this function will find the closest supported aspect ratio. """ target_aspect_ratio = img_width / img_height asp_dict = self._find_supported_aspect_ratios() closest_aspect_ratio = None if target_aspect_ratio >= 1: closest_aspect_ratio = min( [k for k in asp_dict if k <= target_aspect_ratio], key=lambda x: abs(x - target_aspect_ratio), ) tiles_given_aspect_ratio = asp_dict[closest_aspect_ratio] # select largest width return max(tiles_given_aspect_ratio, key=lambda x: x[0]) else: closest_aspect_ratio = min( [k for k in asp_dict if k > target_aspect_ratio], key=lambda x: abs(1 / x - 1 / target_aspect_ratio), ) tiles_given_aspect_ratio = asp_dict[closest_aspect_ratio] # select largest height return max(tiles_given_aspect_ratio, key=lambda x: x[1]) def _split(self, image: torch.Tensor, ncw: int, nch: int) -> torch.Tensor: # Split image into number of required tiles (width x height) batch_size, num_channels, height, width = image.size() image = image.view(batch_size, num_channels, nch, height // nch, ncw, width // ncw) # Permute dimensions to reorder the axes image = image.permute(0, 2, 4, 1, 3, 5).contiguous() # Reshape into the desired output shape (batch_size * 4, num_channels, width/2, height/2) image = image.view(batch_size, ncw * nch, num_channels, height // nch, width // ncw) return image def resize( self, image: np.ndarray, tile_size: int, max_num_tiles: int, resample: PILImageResampling = PILImageResampling.BICUBIC, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): height, width = get_image_size(image, channel_dim=input_data_format) if max_num_tiles > 1: aspect_ratio = self._fit_image_to_canvas(img_width=width, img_height=height, tile_size=tile_size) if aspect_ratio is None: # If we did not find a canvas, we have to find the closest aspect ratio and downsample the image aspect_ratio = self._find_closest_aspect_ratio(img_width=width, img_height=height, tile_size=tile_size) else: aspect_ratio = (1, 1) new_width, new_height = aspect_ratio[0] * tile_size, aspect_ratio[1] * tile_size image = F.resize(image, (new_height, new_width), interpolation=resample) return image, aspect_ratio def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, do_rescale: Optional[bool], rescale_factor: Optional[Union[int, float]], do_normalize: Optional[bool], image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], vision_input_type: str, tile_size: int, max_num_tiles: int, return_tensors: Optional[Union[str, TensorType]], disable_grouping: bool, **kwargs: Unpack[PerceptionLMFastImageProcessorKwargs], ) -> BatchFeature: # Group images by size for batched transformation grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: if vision_input_type == "thumb+tile": thumbnails, _ = self.resize(stacked_images, tile_size, max_num_tiles=1) images_for_tiling, (tiles_w, tiles_h) = self.resize( stacked_images, tile_size, max_num_tiles=max_num_tiles ) image_tiles = self._split(images_for_tiling, tiles_w, tiles_h) stacked_images = torch.cat([thumbnails.unsqueeze(1), image_tiles], dim=1) else: # vanilla single tile for low memory devices stacked_images, _ = self.resize(stacked_images, tile_size, max_num_tiles=1) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std, ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = [p[None] if p.ndim == 3 else p for p in processed_images] # add tiles dimension if needed processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["PerceptionLMImageProcessorFast"]
transformers/src/transformers/models/perception_lm/image_processing_perception_lm_fast.py/0
{ "file_path": "transformers/src/transformers/models/perception_lm/image_processing_perception_lm_fast.py", "repo_id": "transformers", "token_count": 6135 }
512
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/phi3/modular_phi3.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_phi3.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Optional, Union import torch from torch import nn from transformers.utils.generic import check_model_inputs from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...integrations import use_kernel_forward_from_hub from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import ( GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer, ) from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...utils.deprecation import deprecate_kwarg from .configuration_phi3 import Phi3Config class Phi3MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False) self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) self.activation_fn = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: up_states = self.gate_up_proj(hidden_states) gate, up_states = up_states.chunk(2, dim=-1) up_states = up_states * self.activation_fn(gate) return self.down_proj(up_states) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) rotary_dim = cos.shape[-1] q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] q_embed = torch.cat([(q_rot * cos) + (rotate_half(q_rot) * sin), q_pass], dim=-1) k_embed = torch.cat([(k_rot * cos) + (rotate_half(k_rot) * sin), k_pass], dim=-1) return q_embed, k_embed class Phi3Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.num_key_value_heads = config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True op_size = config.num_attention_heads * self.head_dim + 2 * (config.num_key_value_heads * self.head_dim) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) self.qkv_proj = nn.Linear(config.hidden_size, op_size, bias=False) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) qkv = self.qkv_proj(hidden_states) query_pos = self.config.num_attention_heads * self.head_dim query_states = qkv[..., :query_pos] key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim] value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :] query_states = query_states.view(hidden_shape).transpose(1, 2) key_states = key_states.view(hidden_shape).transpose(1, 2) value_states = value_states.view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=getattr(self.config, "sliding_window", None), **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights @use_kernel_forward_from_hub("RMSNorm") class Phi3RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Phi3RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class Phi3DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Phi3Config, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Phi3Attention(config=config, layer_idx=layer_idx) self.mlp = Phi3MLP(config) self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.config = config self.resid_attn_dropout = nn.Dropout(config.resid_pdrop) self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + self.resid_attn_dropout(hidden_states) # main diff with Llama residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + self.resid_mlp_dropout(hidden_states) # main diff with Llama return hidden_states @auto_docstring class Phi3PreTrainedModel(PreTrainedModel): config: Phi3Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Phi3DecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": Phi3DecoderLayer, "attentions": Phi3Attention, } _version = "0.0.5" class Phi3RotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: Phi3Config, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) @auto_docstring class Phi3Model(Phi3PreTrainedModel): def __init__(self, config: Phi3Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Phi3RotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask causal_mask = mask_function( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) @auto_docstring class Phi3ForCausalLM(Phi3PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} def __init__(self, config): super().__init__(config) self.model = Phi3Model(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" Example: ```python >>> from transformers import AutoTokenizer, Phi3ForCausalLM >>> model = Phi3ForCausalLM.from_pretrained("meta-phi3/Phi3-2-7b-hf") >>> tokenizer = AutoTokenizer.from_pretrained("meta-phi3/Phi3-2-7b-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, logits_to_keep=None, **kwargs, ): # Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the # process # When the first time input length reached long and short factor switching point, enforce re-compute cache # It will cause downside of slower at this single token position, however, better than current failure. if ( past_key_values and self.config.rope_scaling and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1 ): past_length = cache_position[0] if past_length <= self.config.original_max_position_embeddings: past_key_values = None model_inputs = super().prepare_inputs_for_generation( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, use_cache=use_cache, logits_to_keep=logits_to_keep, **kwargs, ) return model_inputs class Phi3ForSequenceClassification(GenericForSequenceClassification, Phi3PreTrainedModel): pass class Phi3ForTokenClassification(GenericForTokenClassification, Phi3PreTrainedModel): pass __all__ = [ "Phi3PreTrainedModel", "Phi3Model", "Phi3ForCausalLM", "Phi3ForSequenceClassification", "Phi3ForTokenClassification", ]
transformers/src/transformers/models/phi3/modeling_phi3.py/0
{ "file_path": "transformers/src/transformers/models/phi3/modeling_phi3.py", "repo_id": "transformers", "token_count": 10082 }
513
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pix2Struct model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Pix2StructTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pix2StructTextModel`]. It is used to instantiate a Pix2Struct text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pix2Struct text decoder used by the [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50244): Vocabulary size of the `Pix2Struct` text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Pix2StructTextModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Dimensionality of the key, query, value projections in each attention head. d_ff (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. layer_norm_epsilon (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). dense_act_fn (`Union[Callable, str]`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string). decoder_start_token_id (`int`, *optional*, defaults to 0): The id of the `decoder_start_token_id` token. use_cache (`bool`, *optional*, defaults to `False`): Whether or not the model should return the last key/values attentions (not used by all models). pad_token_id (`int`, *optional*, defaults to 0): The id of the `padding` token. eos_token_id (`int`, *optional*, defaults to 1): The id of the `end-of-sequence` token. Example: ```python >>> from transformers import Pix2StructTextConfig, Pix2StructTextModel >>> # Initializing a Pix2StructTextConfig with google/pix2struct-base style configuration >>> configuration = Pix2StructTextConfig() >>> # Initializing a Pix2StructTextModel (with random weights) from the google/pix2struct-base style configuration >>> model = Pix2StructTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "pix2struct_text_model" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", "decoder_attention_heads": "num_heads", "encoder_attention_heads": "num_heads", "encoder_layers": "num_layers", "decoder_layers": "num_layers", } def __init__( self, vocab_size=50244, hidden_size=768, d_kv=64, d_ff=2048, num_layers=12, num_heads=12, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, dense_act_fn="gelu_new", decoder_start_token_id=0, use_cache=False, pad_token_id=0, eos_token_id=1, tie_word_embeddings=False, is_decoder=True, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.use_cache = use_cache self.eos_token_id = eos_token_id self.decoder_start_token_id = decoder_start_token_id # for backwards compatibility self.dense_act_fn = dense_act_fn super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, tie_word_embeddings=tie_word_embeddings, is_decoder=is_decoder, **kwargs, ) class Pix2StructVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pix2StructVisionModel`]. It is used to instantiate a Pix2Struct vision model according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the Pix2Struct-base [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. patch_embed_hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the input patch_embedding layer in the Transformer encoder. d_ff (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. d_kv (`int`, *optional*, defaults to 64): Dimensionality of the key, query, value projections per attention head. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. dense_act_fn (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. dropout_rate (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). seq_len (`int`, *optional*, defaults to 4096): Maximum sequence length (here number of patches) supported by the model. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance (in tokens) to use for each attention layer. Example: ```python >>> from transformers import Pix2StructVisionConfig, Pix2StructVisionModel >>> # Initializing a Pix2StructVisionConfig with google/pix2struct-base style configuration >>> configuration = Pix2StructVisionConfig() >>> # Initializing a Pix2StructVisionModel (with random weights) from the google/pix2struct-base style configuration >>> model = Pix2StructVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "pix2struct_vision_model" def __init__( self, hidden_size=768, patch_embed_hidden_size=768, d_ff=2048, d_kv=64, num_hidden_layers=12, num_attention_heads=12, dense_act_fn="gelu_new", layer_norm_eps=1e-6, dropout_rate=0.0, attention_dropout=0.0, initializer_range=1e-10, initializer_factor=1.0, seq_len=4096, relative_attention_num_buckets=32, relative_attention_max_distance=128, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.patch_embed_hidden_size = patch_embed_hidden_size self.d_ff = d_ff self.dropout_rate = dropout_rate self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.dense_act_fn = dense_act_fn self.seq_len = seq_len self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.d_kv = d_kv class Pix2StructConfig(PretrainedConfig): r""" [`Pix2StructConfig`] is the configuration class to store the configuration of a [`Pix2StructForConditionalGeneration`]. It is used to instantiate a Pix2Struct model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pix2Struct-base [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Pix2StructTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Pix2StructVisionConfig`]. initializer_factor (`float`, *optional*, defaults to 1.0): Factor to multiply the initialization range with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. is_vqa (`bool`, *optional*, defaults to `False`): Whether the model has been fine-tuned for VQA or not. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import Pix2StructConfig, Pix2StructForConditionalGeneration >>> # Initializing a Pix2StructConfig with google/pix2struct-base style configuration >>> configuration = Pix2StructConfig() >>> # Initializing a Pix2StructForConditionalGeneration (with random weights) from the google/pix2struct-base style configuration >>> model = Pix2StructForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a Pix2StructConfig from a Pix2StructTextConfig and a Pix2StructVisionConfig >>> # Initializing a Pix2Struct text and Pix2Struct vision configuration >>> config_text = Pix2StructTextConfig() >>> config_vision = Pix2StructVisionConfig() >>> config = Pix2StructConfig.from_text_vision_configs(config_text, config_vision) ```""" model_type = "pix2struct" sub_configs = {"text_config": Pix2StructTextConfig, "vision_config": Pix2StructVisionConfig} def __init__( self, text_config=None, vision_config=None, initializer_factor=1.0, initializer_range=0.02, is_vqa=False, tie_word_embeddings=False, is_encoder_decoder=True, **kwargs, ): super().__init__(tie_word_embeddings=tie_word_embeddings, is_encoder_decoder=is_encoder_decoder, **kwargs) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values.") if vision_config is None: vision_config = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values.") text_config["is_encoder_decoder"] = is_encoder_decoder text_config["tie_word_embeddings"] = tie_word_embeddings self.text_config = Pix2StructTextConfig(**text_config) self.vision_config = Pix2StructVisionConfig(**vision_config) self.decoder_start_token_id = self.text_config.decoder_start_token_id self.pad_token_id = self.text_config.pad_token_id self.eos_token_id = self.text_config.eos_token_id self.initializer_factor = initializer_factor self.initializer_range = initializer_range self.text_config.initializer_range = self.initializer_range self.vision_config.initializer_range = self.initializer_range self.is_vqa = is_vqa __all__ = ["Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig"]
transformers/src/transformers/models/pix2struct/configuration_pix2struct.py/0
{ "file_path": "transformers/src/transformers/models/pix2struct/configuration_pix2struct.py", "repo_id": "transformers", "token_count": 5767 }
514
# coding=utf-8 # Copyright 2022, UCLA NLP, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch PLBART model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...cache_utils import Cache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( AttentionMaskConverter, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, ) from ...modeling_outputs import ( BaseModelOutput, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, is_torch_flex_attn_available from ..bart.modeling_bart import ( BartClassificationHead, BartDecoder, BartEncoder, BartForCausalLM, BartScaledWordEmbedding, ) from ..bigbird_pegasus.modeling_bigbird_pegasus import BigBirdPegasusForSequenceClassification from ..mbart.modeling_mbart import shift_tokens_right from .configuration_plbart import PLBartConfig if is_torch_flex_attn_available(): from ...integrations.flex_attention import BlockMask, make_flex_block_causal_mask class PLBartScaledWordEmbedding(BartScaledWordEmbedding): pass @auto_docstring class PLBartPreTrainedModel(PreTrainedModel): config: PLBartConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["PLBartDecoderLayer", "PLBartEncoderLayer"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True # Copied from transformers.models.bart.modeling_bart.BartPreTrainedModel._update_full_mask def _update_full_mask( self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor, ): if attention_mask is not None: if self.config._attn_implementation == "flash_attention_2": attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & head_mask can not be supported when using SDPA, fall back to # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask # Copied from transformers.models.bart.modeling_bart.BartPreTrainedModel._update_causal_mask def _update_causal_mask( self, attention_mask: Optional[Union[torch.Tensor, "BlockMask"]], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, ): if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) # Other attention flavors support in-built causal (when `mask is None`) # while we need to create our specific block mask regardless elif attention_mask is None: attention_mask = make_flex_block_causal_mask( torch.ones( size=(input_tensor.shape[0], input_tensor.shape[1]), device=attention_mask.device, ) ) return attention_mask if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask # Copied from transformers.models.bart.modeling_bart.BartPreTrainedModel._update_cross_attn_mask def _update_cross_attn_mask( self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, ): # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: if self.config._attn_implementation == "flash_attention_2": encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1], ) elif self.config._attn_implementation == "flex_attention": if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask( encoder_attention_mask, query_length=input_shape[-1], is_causal=False, ) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) return encoder_attention_mask class PLBartEncoder(BartEncoder): pass class PLBartDecoder(BartDecoder): pass @auto_docstring class PLBartModel(PLBartPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: PLBartConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = PLBartScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = PLBartEncoder(config, self.shared) self.decoder = PLBartDecoder(config, self.shared) self.init_weights() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.LongTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (: obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # different to other models, PLBart automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" The PLBART Model with a language modeling head. Can be used for code-to-text, text-to-code and code-to-code. """ ) class PLBartForConditionalGeneration(PLBartPreTrainedModel, GenerationMixin): base_model_prefix = "model" _keys_to_ignore_on_load_missing = ["final_logits_bias"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: PLBartConfig): super().__init__(config) self.model = PLBartModel(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.init_weights() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings( self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True ) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.LongTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.Tensor], Seq2SeqLMOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (: obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example Mask-filling: ```python >>> from transformers import AutoTokenizer, PLBartForConditionalGeneration >>> model = PLBartForConditionalGeneration.from_pretrained("uclanlp/plbart-base") >>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base") >>> # en_XX is the language symbol id <LID> for English >>> TXT = "<s> Is 0 the <mask> Fibonacci number ? </s> en_XX" >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt").input_ids >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ['first', 'same', 'highest', 'result', 'number'] ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) lm_logits = self.lm_head(outputs[0]) lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id) class PLBartClassificationHead(BartClassificationHead): pass class PLBartForSequenceClassification(BigBirdPegasusForSequenceClassification): def forward(**super_kwargs): r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (: obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ super().forward(**super_kwargs) class PLBartForCausalLM(BartForCausalLM): @auto_docstring def forward(**super_kwargs): r""" cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, PLBartForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base") >>> model = PLBartForCausalLM.from_pretrained("uclanlp/plbart-base", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```""" super().forward(**super_kwargs) __all__ = [ "PLBartForCausalLM", "PLBartForConditionalGeneration", "PLBartForSequenceClassification", "PLBartModel", "PLBartPreTrainedModel", ]
transformers/src/transformers/models/plbart/modular_plbart.py/0
{ "file_path": "transformers/src/transformers/models/plbart/modular_plbart.py", "repo_id": "transformers", "token_count": 13536 }
515
# coding=utf-8 # Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, # Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch PVT model.""" import collections import math from collections.abc import Iterable from typing import Optional, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import auto_docstring, logging from .configuration_pvt import PvtConfig logger = logging.get_logger(__name__) # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->Pvt class PvtDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" class PvtPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__( self, config: PvtConfig, image_size: Union[int, Iterable[int]], patch_size: Union[int, Iterable[int]], stride: int, num_channels: int, hidden_size: int, cls_token: bool = False, ): super().__init__() self.config = config image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.position_embeddings = nn.Parameter( torch.randn(1, num_patches + 1 if cls_token else num_patches, hidden_size) ) self.cls_token = nn.Parameter(torch.zeros(1, 1, hidden_size)) if cls_token else None self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=stride, stride=patch_size) self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(p=config.hidden_dropout_prob) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = height * width # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == self.config.image_size * self.config.image_size: return self.position_embeddings embeddings = embeddings.reshape(1, height, width, -1).permute(0, 3, 1, 2) interpolated_embeddings = F.interpolate(embeddings, size=(height, width), mode="bilinear") interpolated_embeddings = interpolated_embeddings.reshape(1, -1, height * width).permute(0, 2, 1) return interpolated_embeddings def forward(self, pixel_values: torch.Tensor) -> tuple[torch.Tensor, int, int]: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) patch_embed = self.projection(pixel_values) *_, height, width = patch_embed.shape patch_embed = patch_embed.flatten(2).transpose(1, 2) embeddings = self.layer_norm(patch_embed) if self.cls_token is not None: cls_token = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_token, embeddings), dim=1) position_embeddings = self.interpolate_pos_encoding(self.position_embeddings[:, 1:], height, width) position_embeddings = torch.cat((self.position_embeddings[:, :1], position_embeddings), dim=1) else: position_embeddings = self.interpolate_pos_encoding(self.position_embeddings, height, width) embeddings = self.dropout(embeddings + position_embeddings) return embeddings, height, width class PvtSelfOutput(nn.Module): def __init__(self, config: PvtConfig, hidden_size: int): super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class PvtEfficientSelfAttention(nn.Module): """Efficient self-attention mechanism with reduction of the sequence [PvT paper](https://huggingface.co/papers/2102.12122).""" def __init__( self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float ): super().__init__() self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError( f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " f"heads ({self.num_attention_heads})" ) self.attention_head_size = int(self.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.sequences_reduction_ratio = sequences_reduction_ratio if sequences_reduction_ratio > 1: self.sequence_reduction = nn.Conv2d( hidden_size, hidden_size, kernel_size=sequences_reduction_ratio, stride=sequences_reduction_ratio ) self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) def transpose_for_scores(self, hidden_states: int) -> torch.Tensor: new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size) hidden_states = hidden_states.view(new_shape) return hidden_states.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False, ) -> tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) if self.sequences_reduction_ratio > 1: batch_size, seq_len, num_channels = hidden_states.shape # Reshape to (batch_size, num_channels, height, width) hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) # Apply sequence reduction hidden_states = self.sequence_reduction(hidden_states) # Reshape back to (batch_size, seq_len, num_channels) hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1) hidden_states = self.layer_norm(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class PvtAttention(nn.Module): def __init__( self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float ): super().__init__() self.self = PvtEfficientSelfAttention( config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequences_reduction_ratio=sequences_reduction_ratio, ) self.output = PvtSelfOutput(config, hidden_size=hidden_size) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False ) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, height, width, output_attentions) attention_output = self.output(self_outputs[0]) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class PvtFFN(nn.Module): def __init__( self, config: PvtConfig, in_features: int, hidden_features: Optional[int] = None, out_features: Optional[int] = None, ): super().__init__() out_features = out_features if out_features is not None else in_features self.dense1 = nn.Linear(in_features, hidden_features) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(hidden_features, out_features) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class PvtLayer(nn.Module): def __init__( self, config: PvtConfig, hidden_size: int, num_attention_heads: int, drop_path: float, sequences_reduction_ratio: float, mlp_ratio: float, ): super().__init__() self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.attention = PvtAttention( config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequences_reduction_ratio=sequences_reduction_ratio, ) self.drop_path = PvtDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = PvtFFN(config=config, in_features=hidden_size, hidden_features=mlp_hidden_size) def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False): self_attention_outputs = self.attention( hidden_states=self.layer_norm_1(hidden_states), height=height, width=width, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] attention_output = self.drop_path(attention_output) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states)) mlp_output = self.drop_path(mlp_output) layer_output = hidden_states + mlp_output outputs = (layer_output,) + outputs return outputs class PvtEncoder(nn.Module): def __init__(self, config: PvtConfig): super().__init__() self.config = config # stochastic depth decay rule drop_path_decays = torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu").tolist() # patch embeddings embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append( PvtPatchEmbeddings( config=config, image_size=config.image_size if i == 0 else self.config.image_size // (2 ** (i + 1)), patch_size=config.patch_sizes[i], stride=config.strides[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], cls_token=i == config.num_encoder_blocks - 1, ) ) self.patch_embeddings = nn.ModuleList(embeddings) # Transformer blocks blocks = [] cur = 0 for i in range(config.num_encoder_blocks): # each block consists of layers layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append( PvtLayer( config=config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=drop_path_decays[cur + j], sequences_reduction_ratio=config.sequence_reduction_ratios[i], mlp_ratio=config.mlp_ratios[i], ) ) blocks.append(nn.ModuleList(layers)) self.block = nn.ModuleList(blocks) # Layer norms self.layer_norm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = pixel_values.shape[0] num_blocks = len(self.block) hidden_states = pixel_values for idx, (embedding_layer, block_layer) in enumerate(zip(self.patch_embeddings, self.block)): # first, obtain patch embeddings hidden_states, height, width = embedding_layer(hidden_states) # second, send embeddings through blocks for block in block_layer: layer_outputs = block(hidden_states, height, width, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if idx != num_blocks - 1: hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring class PvtPreTrainedModel(PreTrainedModel): config: PvtConfig base_model_prefix = "pvt" main_input_name = "pixel_values" _no_split_modules = [] def _init_weights(self, module: nn.Module) -> None: """Initialize the weights""" std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues nn.init.trunc_normal_(module.weight.data, mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, PvtPatchEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data, mean=0.0, std=std, ) if module.cls_token is not None: module.cls_token.data = nn.init.trunc_normal_( module.cls_token.data, mean=0.0, std=std, ) @auto_docstring class PvtModel(PvtPreTrainedModel): def __init__(self, config: PvtConfig): super().__init__(config) self.config = config # hierarchical Transformer encoder self.encoder = PvtEncoder(config) # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" Pvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """ ) class PvtForImageClassification(PvtPreTrainedModel): def __init__(self, config: PvtConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.pvt = PvtModel(config) # Classifier head self.classifier = ( nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor], labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.pvt( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output[:, 0, :]) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["PvtForImageClassification", "PvtModel", "PvtPreTrainedModel"]
transformers/src/transformers/models/pvt/modeling_pvt.py/0
{ "file_path": "transformers/src/transformers/models/pvt/modeling_pvt.py", "repo_id": "transformers", "token_count": 11185 }
516
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for RAG.""" import os import warnings from typing import Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig logger = logging.get_logger(__name__) class RagTokenizer: def __init__(self, question_encoder, generator): self.question_encoder = question_encoder self.generator = generator self.current_tokenizer = self.question_encoder def save_pretrained(self, save_directory): if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) question_encoder_path = os.path.join(save_directory, "question_encoder_tokenizer") generator_path = os.path.join(save_directory, "generator_tokenizer") self.question_encoder.save_pretrained(question_encoder_path) self.generator.save_pretrained(generator_path) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer config = kwargs.pop("config", None) if config is None: config = RagConfig.from_pretrained(pretrained_model_name_or_path) question_encoder = AutoTokenizer.from_pretrained( pretrained_model_name_or_path, config=config.question_encoder, subfolder="question_encoder_tokenizer" ) generator = AutoTokenizer.from_pretrained( pretrained_model_name_or_path, config=config.generator, subfolder="generator_tokenizer" ) return cls(question_encoder=question_encoder, generator=generator) def __call__(self, *args, **kwargs): return self.current_tokenizer(*args, **kwargs) def batch_decode(self, *args, **kwargs): return self.generator.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.generator.decode(*args, **kwargs) def _switch_to_input_mode(self): self.current_tokenizer = self.question_encoder def _switch_to_target_mode(self): self.current_tokenizer = self.generator def prepare_seq2seq_batch( self, src_texts: list[str], tgt_texts: Optional[list[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: Optional[str] = None, truncation: bool = True, **kwargs, ) -> BatchEncoding: warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details", FutureWarning, ) if max_length is None: max_length = self.current_tokenizer.model_max_length model_inputs = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = self.current_tokenizer.model_max_length labels = self( text_target=tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, ) model_inputs["labels"] = labels["input_ids"] return model_inputs __all__ = ["RagTokenizer"]
transformers/src/transformers/models/rag/tokenization_rag.py/0
{ "file_path": "transformers/src/transformers/models/rag/tokenization_rag.py", "repo_id": "transformers", "token_count": 1868 }
517
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch RegNet model.""" import math from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from .configuration_regnet import RegNetConfig logger = logging.get_logger(__name__) class RegNetConvLayer(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, groups: int = 1, activation: Optional[str] = "relu", ): super().__init__() self.convolution = nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups, bias=False, ) self.normalization = nn.BatchNorm2d(out_channels) self.activation = ACT2FN[activation] if activation is not None else nn.Identity() def forward(self, hidden_state): hidden_state = self.convolution(hidden_state) hidden_state = self.normalization(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state class RegNetEmbeddings(nn.Module): """ RegNet Embeddings (stem) composed of a single aggressive convolution. """ def __init__(self, config: RegNetConfig): super().__init__() self.embedder = RegNetConvLayer( config.num_channels, config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act ) self.num_channels = config.num_channels def forward(self, pixel_values): num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) hidden_state = self.embedder(pixel_values) return hidden_state # Copied from transformers.models.resnet.modeling_resnet.ResNetShortCut with ResNet->RegNet class RegNetShortCut(nn.Module): """ RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ def __init__(self, in_channels: int, out_channels: int, stride: int = 2): super().__init__() self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False) self.normalization = nn.BatchNorm2d(out_channels) def forward(self, input: Tensor) -> Tensor: hidden_state = self.convolution(input) hidden_state = self.normalization(hidden_state) return hidden_state class RegNetSELayer(nn.Module): """ Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://huggingface.co/papers/1709.01507). """ def __init__(self, in_channels: int, reduced_channels: int): super().__init__() self.pooler = nn.AdaptiveAvgPool2d((1, 1)) self.attention = nn.Sequential( nn.Conv2d(in_channels, reduced_channels, kernel_size=1), nn.ReLU(), nn.Conv2d(reduced_channels, in_channels, kernel_size=1), nn.Sigmoid(), ) def forward(self, hidden_state): # b c h w -> b c 1 1 pooled = self.pooler(hidden_state) attention = self.attention(pooled) hidden_state = hidden_state * attention return hidden_state class RegNetXLayer(nn.Module): """ RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1. """ def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1): super().__init__() should_apply_shortcut = in_channels != out_channels or stride != 1 groups = max(1, out_channels // config.groups_width) self.shortcut = ( RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity() ) self.layer = nn.Sequential( RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act), RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act), RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None), ) self.activation = ACT2FN[config.hidden_act] def forward(self, hidden_state): residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state class RegNetYLayer(nn.Module): """ RegNet's Y layer: an X layer with Squeeze and Excitation. """ def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1): super().__init__() should_apply_shortcut = in_channels != out_channels or stride != 1 groups = max(1, out_channels // config.groups_width) self.shortcut = ( RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity() ) self.layer = nn.Sequential( RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act), RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act), RegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4))), RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None), ) self.activation = ACT2FN[config.hidden_act] def forward(self, hidden_state): residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state class RegNetStage(nn.Module): """ A RegNet stage composed by stacked layers. """ def __init__( self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, ): super().__init__() layer = RegNetXLayer if config.layer_type == "x" else RegNetYLayer self.layers = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( config, in_channels, out_channels, stride=stride, ), *[layer(config, out_channels, out_channels) for _ in range(depth - 1)], ) def forward(self, hidden_state): hidden_state = self.layers(hidden_state) return hidden_state class RegNetEncoder(nn.Module): def __init__(self, config: RegNetConfig): super().__init__() self.stages = nn.ModuleList([]) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( config, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], ) ) in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]): self.stages.append(RegNetStage(config, in_channels, out_channels, depth=depth)) def forward( self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True ) -> BaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) @auto_docstring class RegNetPreTrainedModel(PreTrainedModel): config: RegNetConfig base_model_prefix = "regnet" main_input_name = "pixel_values" _no_split_modules = ["RegNetYLayer"] # Copied from transformers.models.resnet.modeling_resnet.ResNetPreTrainedModel._init_weights def _init_weights(self, module): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") # copied from the `reset_parameters` method of `class Linear(Module)` in `torch`. elif isinstance(module, nn.Linear): nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5)) if module.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 nn.init.uniform_(module.bias, -bound, bound) elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) @auto_docstring # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class RegNetModel(RegNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embedder = RegNetEmbeddings(config) self.encoder = RegNetEncoder(config) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @auto_docstring( custom_intro=""" RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class RegNetForImageClassification(RegNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.regnet = RegNetModel(config) # classification head self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(), ) # initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> ImageClassifierOutputWithNoAttention: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.regnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) __all__ = ["RegNetForImageClassification", "RegNetModel", "RegNetPreTrainedModel"]
transformers/src/transformers/models/regnet/modeling_regnet.py/0
{ "file_path": "transformers/src/transformers/models/regnet/modeling_regnet.py", "repo_id": "transformers", "token_count": 6454 }
518
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RoBERTa configuration""" from collections import OrderedDict from collections.abc import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class RobertaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RobertaModel`] or a [`TFRobertaModel`]. It is used to instantiate a RoBERTa model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa [FacebookAI/roberta-base](https://huggingface.co/FacebookAI/roberta-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the RoBERTa model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RobertaModel`] or [`TFRobertaModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`RobertaModel`] or [`TFRobertaModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Examples: ```python >>> from transformers import RobertaConfig, RobertaModel >>> # Initializing a RoBERTa configuration >>> configuration = RobertaConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = RobertaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "roberta" def __init__( self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type="absolute", use_cache=True, classifier_dropout=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout class RobertaOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] ) __all__ = ["RobertaConfig", "RobertaOnnxConfig"]
transformers/src/transformers/models/roberta/configuration_roberta.py/0
{ "file_path": "transformers/src/transformers/models/roberta/configuration_roberta.py", "repo_id": "transformers", "token_count": 2740 }
519
# coding=utf-8 # Copyright 2022 WeChatAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for RoCBert.""" import collections import itertools import json import os import unicodedata from typing import Optional, Union from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, BatchEncoding, EncodedInput, EncodedInputPair, PaddingStrategy, PreTokenizedInput, PreTokenizedInputPair, TensorType, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import add_end_docstrings, logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.txt", "word_shape_file": "word_shape.json", "word_pronunciation_file": "word_pronunciation.json", } # Copied from transformers.models.bert.tokenization_bert.load_vocab def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class RoCBertTokenizer(PreTrainedTokenizer): r""" Args: Construct a RoCBert tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. vocab_file (`str`): File containing the vocabulary. word_shape_file (`str`): File containing the word => shape info. word_pronunciation_file (`str`): File containing the word => pronunciation info. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, word_shape_file, word_pronunciation_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs, ): for cur_file in [vocab_file, word_shape_file, word_pronunciation_file]: if cur_file is None or not os.path.isfile(cur_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google " "pretrained model use `tokenizer = RoCBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) with open(word_shape_file, "r", encoding="utf8") as in_file: self.word_shape = json.load(in_file) with open(word_pronunciation_file, "r", encoding="utf8") as in_file: self.word_pronunciation = json.load(in_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = RoCBertBasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = RoCBertWordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize( text, never_split=self.all_special_tokens if not split_special_tokens else None ): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kwargs) tokens_ids = self.convert_tokens_to_ids(tokens) tokens_shape_ids = self.convert_tokens_to_shape_ids(tokens) tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(tokens) return tokens_ids, tokens_shape_ids, tokens_proun_ids elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_split_into_words: tokens = list( itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text)) ) tokens_ids = self.convert_tokens_to_ids(tokens) tokens_shape_ids = self.convert_tokens_to_shape_ids(tokens) tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(tokens) return tokens_ids, tokens_shape_ids, tokens_proun_ids else: tokens_ids = self.convert_tokens_to_ids(text) tokens_shape_ids = self.convert_tokens_to_shape_ids(text) tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(text) return tokens_ids, tokens_shape_ids, tokens_proun_ids elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text, [0] * len(text), [0] * len(text) # shape and proun id is pad_value else: if is_split_into_words: raise ValueError( f"Input {text} is not valid. Should be a string or a list/tuple of strings when" " `is_split_into_words=True`." ) else: raise ValueError( f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of" " integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) first_ids, first_shape_ids, first_proun_ids = get_input_ids(text) if text_pair is not None: second_ids, second_shape_ids, second_proun_ids = get_input_ids(text_pair) else: second_ids, second_shape_ids, second_proun_ids = None, None, None return self.prepare_for_model( first_ids, first_shape_ids, first_proun_ids, pair_ids=second_ids, pair_shape_ids=second_shape_ids, pair_pronunciation_ids=second_proun_ids, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, ids: list[int], shape_ids: list[int], pronunciation_ids: list[int], pair_ids: Optional[list[int]] = None, pair_shape_ids: Optional[list[int]] = None, pair_pronunciation_ids: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_id` methods. shape_ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_token_to_shape_id` methods. pronunciation_ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_token_to_pronunciation_id` methods. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_id` methods. pair_shape_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_token_to_shape_id` methods. pair_pronunciation_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_token_to_pronunciation_id` methods. """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) if ( return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is not None ): raise ValueError( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} # Compute the total size of the returned encodings total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ids, pair_ids, overflowing_tokens = self.truncate_sequences( ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) shape_ids, pair_shape_ids, _ = self.truncate_sequences( shape_ids, pair_ids=pair_shape_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) pronunciation_ids, pair_pronunciation_ids, _ = self.truncate_sequences( pronunciation_ids, pair_ids=pair_pronunciation_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) input_shape_ids = self.build_inputs_with_special_tokens( shape_ids, pair_shape_ids, self.word_shape["[UNK]"], self.word_shape["[UNK]"] ) input_pronunciation_ids = self.build_inputs_with_special_tokens( pronunciation_ids, pair_pronunciation_ids, self.word_pronunciation["[UNK]"], self.word_pronunciation["[UNK]"], ) else: sequence = ids + pair_ids if pair_ids else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair_ids else []) input_shape_ids = shape_ids + pair_shape_ids if pair_shape_ids else shape_ids input_pronunciation_ids = ( pronunciation_ids + pair_pronunciation_ids if pair_pronunciation_ids else pronunciation_ids ) # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["input_shape_ids"] = input_shape_ids encoded_inputs["input_pronunciation_ids"] = input_pronunciation_ids if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def _pad( self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_attention_mask: Optional[bool] = None, ) -> dict: # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference for key in ["input_shape_ids", "input_pronunciation_ids"]: if key in encoded_inputs: encoded_inputs[key] = encoded_inputs[key] + [self.pad_token_id] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] for key in ["input_shape_ids", "input_pronunciation_ids"]: if key in encoded_inputs: encoded_inputs[key] = [self.pad_token_id] * difference + encoded_inputs[key] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], list[PreTokenizedInputPair], list[EncodedInput], list[EncodedInputPair], ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kwargs) tokens_ids = self.convert_tokens_to_ids(tokens) tokens_shape_ids = self.convert_tokens_to_shape_ids(tokens) tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(tokens) return tokens_ids, tokens_shape_ids, tokens_proun_ids elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_split_into_words: tokens = list( itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text)) ) tokens_ids = self.convert_tokens_to_ids(tokens) tokens_shape_ids = self.convert_tokens_to_shape_ids(tokens) tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(tokens) return tokens_ids, tokens_shape_ids, tokens_proun_ids else: tokens_ids = self.convert_tokens_to_ids(text) tokens_shape_ids = self.convert_tokens_to_shape_ids(text) tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(text) return tokens_ids, tokens_shape_ids, tokens_proun_ids elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text, [0] * len(text), [0] * len(text) # shape and proun id is pad_value else: raise ValueError( "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) input_ids = [] input_shape_ids = [] input_pronunciation_ids = [] for ids_or_pair_ids in batch_text_or_text_pairs: if not isinstance(ids_or_pair_ids, (list, tuple)): ids, pair_ids = ids_or_pair_ids, None elif is_split_into_words and not isinstance(ids_or_pair_ids[0], (list, tuple)): ids, pair_ids = ids_or_pair_ids, None else: ids, pair_ids = ids_or_pair_ids first_ids, first_shape_ids, first_proun_ids = get_input_ids(ids) if pair_ids is not None: second_ids, second_shape_ids, second_proun_ids = get_input_ids(pair_ids) else: second_ids, second_shape_ids, second_proun_ids = None, None, None input_ids.append((first_ids, second_ids)) input_shape_ids.append((first_shape_ids, second_shape_ids)) input_pronunciation_ids.append((first_proun_ids, second_proun_ids)) batch_outputs = self._batch_prepare_for_model( input_ids, batch_shape_ids_pairs=input_shape_ids, batch_pronunciation_ids_pairs=input_pronunciation_ids, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_ids_pairs: list[Union[PreTokenizedInputPair, tuple[list[int], None]]], batch_shape_ids_pairs: list[Union[PreTokenizedInputPair, tuple[list[int], None]]], batch_pronunciation_ids_pairs: list[Union[PreTokenizedInputPair, tuple[list[int], None]]], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs batch_shape_ids_pairs: list of tokenized input shape ids or input shape ids pairs batch_pronunciation_ids_pairs: list of tokenized input pronunciation ids or input pronunciation ids pairs """ batch_outputs = {} for i, (first_ids, second_ids) in enumerate(batch_ids_pairs): first_shape_ids, second_shape_ids = batch_shape_ids_pairs[i] first_pronunciation_ids, second_pronunciation_ids = batch_pronunciation_ids_pairs[i] outputs = self.prepare_for_model( first_ids, first_shape_ids, first_pronunciation_ids, pair_ids=second_ids, pair_shape_ids=second_shape_ids, pair_pronunciation_ids=second_pronunciation_ids, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward padding_side=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_token_to_shape_id(self, token): """Converts a token (str) in an shape_id using the shape vocab.""" return self.word_shape.get(token, self.word_shape.get(self.unk_token)) def convert_tokens_to_shape_ids(self, tokens: Union[str, list[str]]) -> Union[int, list[int]]: if tokens is None: return None ids = [] for token in tokens: ids.append(self._convert_token_to_shape_id(token)) return ids def _convert_token_to_pronunciation_id(self, token): """Converts a token (str) in an shape_id using the shape vocab.""" return self.word_pronunciation.get(token, self.word_pronunciation.get(self.unk_token)) def convert_tokens_to_pronunciation_ids(self, tokens: Union[str, list[str]]) -> Union[int, list[int]]: if tokens is None: return None ids = [] for token in tokens: ids.append(self._convert_token_to_pronunciation_id(token)) return ids # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, cls_token_id: Optional[int] = None, sep_token_id: Optional[int] = None, ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ cls = [self.cls_token_id] if cls_token_id is None else [cls_token_id] sep = [self.sep_token_id] if sep_token_id is None else [sep_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str, str, str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"], ) word_shape_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["word_shape_file"], ) word_pronunciation_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["word_pronunciation_file"], ) else: raise ValueError( f"Can't find a directory at path '{save_directory}'. To load the vocabulary from a Google " "pretrained model use `tokenizer = RoCBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 with open(word_shape_file, "w", encoding="utf8") as writer: json.dump(self.word_shape, writer, ensure_ascii=False, indent=4, separators=(", ", ": ")) with open(word_pronunciation_file, "w", encoding="utf8") as writer: json.dump(self.word_pronunciation, writer, ensure_ascii=False, indent=4, separators=(", ", ": ")) return ( vocab_file, word_shape_file, word_pronunciation_file, ) # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer with BasicTokenizer->RoCBertBasicTokenizer class RoCBertBasicTokenizer: """ Constructs a RoCBertBasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) or (cp >= 0x20000 and cp <= 0x2A6DF) or (cp >= 0x2A700 and cp <= 0x2B73F) or (cp >= 0x2B740 and cp <= 0x2B81F) or (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) ): return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer with WordpieceTokenizer->RoCBertWordpieceTokenizer class RoCBertWordpieceTokenizer: """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` will return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens __all__ = ["RoCBertTokenizer"]
transformers/src/transformers/models/roc_bert/tokenization_roc_bert.py/0
{ "file_path": "transformers/src/transformers/models/roc_bert/tokenization_roc_bert.py", "repo_id": "transformers", "token_count": 22826 }
520
# coding=utf-8 # Copyright 2024 Baidu Inc and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch RT-DETR model.""" import math import warnings from dataclasses import dataclass from functools import partial from typing import Optional, Union import torch import torch.nn.functional as F from torch import Tensor, nn from ...activations import ACT2CLS, ACT2FN from ...image_transforms import center_to_corners_format, corners_to_center_format from ...integrations import use_kernel_forward_from_hub from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import compile_compatible_method_lru_cache from ...utils import ( ModelOutput, auto_docstring, logging, torch_int, ) from ...utils.backbone_utils import load_backbone from .configuration_rt_detr import RTDetrConfig logger = logging.get_logger(__name__) # TODO: Replace all occurrences of the checkpoint with the final one @use_kernel_forward_from_hub("MultiScaleDeformableAttention") # Copied from transformers.models.deformable_detr.modeling_deformable_detr.MultiScaleDeformableAttention class MultiScaleDeformableAttention(nn.Module): def forward( self, value: Tensor, value_spatial_shapes: Tensor, value_spatial_shapes_list: list[tuple], level_start_index: Tensor, sampling_locations: Tensor, attention_weights: Tensor, im2col_step: int, ): batch_size, _, num_heads, hidden_dim = value.shape _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape value_list = value.split([height * width for height, width in value_spatial_shapes_list], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for level_id, (height, width) in enumerate(value_spatial_shapes_list): # batch_size, height*width, num_heads, hidden_dim # -> batch_size, height*width, num_heads*hidden_dim # -> batch_size, num_heads*hidden_dim, height*width # -> batch_size*num_heads, hidden_dim, height, width value_l_ = ( value_list[level_id] .flatten(2) .transpose(1, 2) .reshape(batch_size * num_heads, hidden_dim, height, width) ) # batch_size, num_queries, num_heads, num_points, 2 # -> batch_size, num_heads, num_queries, num_points, 2 # -> batch_size*num_heads, num_queries, num_points, 2 sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) # batch_size*num_heads, hidden_dim, num_queries, num_points sampling_value_l_ = nn.functional.grid_sample( value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False, ) sampling_value_list.append(sampling_value_l_) # (batch_size, num_queries, num_heads, num_levels, num_points) # -> (batch_size, num_heads, num_queries, num_levels, num_points) # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) attention_weights = attention_weights.transpose(1, 2).reshape( batch_size * num_heads, 1, num_queries, num_levels * num_points ) output = ( (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) .sum(-1) .view(batch_size, num_heads * hidden_dim, num_queries) ) return output.transpose(1, 2).contiguous() @dataclass @auto_docstring( custom_intro=""" Base class for outputs of the RTDetrDecoder. This class adds two attributes to BaseModelOutputWithCrossAttentions, namely: - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer) - a stacked tensor of intermediate reference points. """ ) class RTDetrDecoderOutput(ModelOutput): r""" intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, config.num_labels)`): Stacked intermediate logits (logits of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`): Stacked intermediate reference points (reference points of each layer of the decoder). intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate predicted corners (predicted corners of each layer of the decoder). initial_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked initial reference points (initial reference points of each layer of the decoder). cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_logits: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None intermediate_predicted_corners: Optional[torch.FloatTensor] = None initial_reference_points: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Base class for outputs of the RT-DETR encoder-decoder model. """ ) class RTDetrModelOutput(ModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, config.num_labels)`): Stacked intermediate logits (logits of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate predicted corners (predicted corners of each layer of the decoder). initial_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points used for the first decoder layer. init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. enc_topk_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the encoder stage. Output of bounding box binary classification (i.e. foreground and background). enc_topk_bboxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`): Logits of predicted bounding boxes coordinates in the encoder stage. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. denoising_meta_values (`dict`): Extra dictionary for the denoising related values. """ last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_logits: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None intermediate_predicted_corners: Optional[torch.FloatTensor] = None initial_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None init_reference_points: Optional[torch.FloatTensor] = None enc_topk_logits: Optional[torch.FloatTensor] = None enc_topk_bboxes: Optional[torch.FloatTensor] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None denoising_meta_values: Optional[dict] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`RTDetrForObjectDetection`]. """ ) class RTDetrObjectDetectionOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~RTDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized (absolute) bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, config.num_labels)`): Stacked intermediate logits (logits of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate predicted corners (predicted corners of each layer of the decoder). initial_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked initial reference points (initial reference points of each layer of the decoder). init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. enc_topk_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the encoder. enc_topk_bboxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the encoder. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. denoising_meta_values (`dict`): Extra dictionary for the denoising related values """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None auxiliary_outputs: Optional[list[dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_logits: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None intermediate_predicted_corners: Optional[torch.FloatTensor] = None initial_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None init_reference_points: Optional[tuple[torch.FloatTensor]] = None enc_topk_logits: Optional[torch.FloatTensor] = None enc_topk_bboxes: Optional[torch.FloatTensor] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None denoising_meta_values: Optional[dict] = None def _get_clones(partial_module, N): return nn.ModuleList([partial_module() for i in range(N)]) # Copied from transformers.models.conditional_detr.modeling_conditional_detr.inverse_sigmoid def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->RTDetr class RTDetrFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->RTDetr def replace_batch_norm(model): r""" Recursively replace all `torch.nn.BatchNorm2d` with `RTDetrFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model """ for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = RTDetrFrozenBatchNorm2d(module.num_features) if module.weight.device != torch.device("meta"): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module) def get_contrastive_denoising_training_group( targets, num_classes, num_queries, class_embed, num_denoising_queries=100, label_noise_ratio=0.5, box_noise_scale=1.0, ): """ Creates a contrastive denoising training group using ground-truth samples. It adds noise to labels and boxes. Args: targets (`list[dict]`): The target objects, each containing 'class_labels' and 'boxes' for objects in an image. num_classes (`int`): Total number of classes in the dataset. num_queries (`int`): Number of query slots in the transformer. class_embed (`callable`): A function or a model layer to embed class labels. num_denoising_queries (`int`, *optional*, defaults to 100): Number of denoising queries. label_noise_ratio (`float`, *optional*, defaults to 0.5): Ratio of noise applied to labels. box_noise_scale (`float`, *optional*, defaults to 1.0): Scale of noise applied to bounding boxes. Returns: `tuple` comprising various elements: - **input_query_class** (`torch.FloatTensor`) -- Class queries with applied label noise. - **input_query_bbox** (`torch.FloatTensor`) -- Bounding box queries with applied box noise. - **attn_mask** (`torch.FloatTensor`) -- Attention mask for separating denoising and reconstruction queries. - **denoising_meta_values** (`dict`) -- Metadata including denoising positive indices, number of groups, and split sizes. """ if num_denoising_queries <= 0: return None, None, None, None num_ground_truths = [len(t["class_labels"]) for t in targets] device = targets[0]["class_labels"].device max_gt_num = max(num_ground_truths) if max_gt_num == 0: return None, None, None, None num_groups_denoising_queries = num_denoising_queries // max_gt_num num_groups_denoising_queries = 1 if num_groups_denoising_queries == 0 else num_groups_denoising_queries # pad gt to max_num of a batch batch_size = len(num_ground_truths) input_query_class = torch.full([batch_size, max_gt_num], num_classes, dtype=torch.int32, device=device) input_query_bbox = torch.zeros([batch_size, max_gt_num, 4], device=device) pad_gt_mask = torch.zeros([batch_size, max_gt_num], dtype=torch.bool, device=device) for i in range(batch_size): num_gt = num_ground_truths[i] if num_gt > 0: input_query_class[i, :num_gt] = targets[i]["class_labels"] input_query_bbox[i, :num_gt] = targets[i]["boxes"] pad_gt_mask[i, :num_gt] = 1 # each group has positive and negative queries. input_query_class = input_query_class.tile([1, 2 * num_groups_denoising_queries]) input_query_bbox = input_query_bbox.tile([1, 2 * num_groups_denoising_queries, 1]) pad_gt_mask = pad_gt_mask.tile([1, 2 * num_groups_denoising_queries]) # positive and negative mask negative_gt_mask = torch.zeros([batch_size, max_gt_num * 2, 1], device=device) negative_gt_mask[:, max_gt_num:] = 1 negative_gt_mask = negative_gt_mask.tile([1, num_groups_denoising_queries, 1]) positive_gt_mask = 1 - negative_gt_mask # contrastive denoising training positive index positive_gt_mask = positive_gt_mask.squeeze(-1) * pad_gt_mask denoise_positive_idx = torch.nonzero(positive_gt_mask)[:, 1] denoise_positive_idx = torch.split( denoise_positive_idx, [n * num_groups_denoising_queries for n in num_ground_truths] ) # total denoising queries num_denoising_queries = torch_int(max_gt_num * 2 * num_groups_denoising_queries) if label_noise_ratio > 0: mask = torch.rand_like(input_query_class, dtype=torch.float) < (label_noise_ratio * 0.5) # randomly put a new one here new_label = torch.randint_like(mask, 0, num_classes, dtype=input_query_class.dtype) input_query_class = torch.where(mask & pad_gt_mask, new_label, input_query_class) if box_noise_scale > 0: known_bbox = center_to_corners_format(input_query_bbox) diff = torch.tile(input_query_bbox[..., 2:] * 0.5, [1, 1, 2]) * box_noise_scale rand_sign = torch.randint_like(input_query_bbox, 0, 2) * 2.0 - 1.0 rand_part = torch.rand_like(input_query_bbox) rand_part = (rand_part + 1.0) * negative_gt_mask + rand_part * (1 - negative_gt_mask) rand_part *= rand_sign known_bbox += rand_part * diff known_bbox.clip_(min=0.0, max=1.0) input_query_bbox = corners_to_center_format(known_bbox) input_query_bbox = inverse_sigmoid(input_query_bbox) input_query_class = class_embed(input_query_class) target_size = num_denoising_queries + num_queries attn_mask = torch.full([target_size, target_size], 0, dtype=torch.float, device=device) # match query cannot see the reconstruction attn_mask[num_denoising_queries:, :num_denoising_queries] = -torch.inf # reconstructions cannot see each other for i in range(num_groups_denoising_queries): idx_block_start = max_gt_num * 2 * i idx_block_end = max_gt_num * 2 * (i + 1) attn_mask[idx_block_start:idx_block_end, :idx_block_start] = -torch.inf attn_mask[idx_block_start:idx_block_end, idx_block_end:num_denoising_queries] = -torch.inf denoising_meta_values = { "dn_positive_idx": denoise_positive_idx, "dn_num_group": num_groups_denoising_queries, "dn_num_split": [num_denoising_queries, num_queries], } return input_query_class, input_query_bbox, attn_mask, denoising_meta_values class RTDetrConvEncoder(nn.Module): """ Convolutional backbone using the modeling_rt_detr_resnet.py. nn.BatchNorm2d layers are replaced by RTDetrFrozenBatchNorm2d as defined above. https://github.com/lyuwenyu/RT-DETR/blob/main/rtdetr_pytorch/src/nn/backbone/presnet.py#L142 """ def __init__(self, config): super().__init__() backbone = load_backbone(config) if config.freeze_backbone_batch_norms: # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = self.model.channels def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values).feature_maps out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out class RTDetrConvNormLayer(nn.Module): def __init__(self, config, in_channels, out_channels, kernel_size, stride, padding=None, activation=None): super().__init__() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size, stride, padding=(kernel_size - 1) // 2 if padding is None else padding, bias=False, ) self.norm = nn.BatchNorm2d(out_channels, config.batch_norm_eps) self.activation = nn.Identity() if activation is None else ACT2CLS[activation]() def forward(self, hidden_state): hidden_state = self.conv(hidden_state) hidden_state = self.norm(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state class RTDetrEncoderLayer(nn.Module): def __init__(self, config: RTDetrConfig): super().__init__() self.normalize_before = config.normalize_before # self-attention self.self_attn = RTDetrMultiheadAttention( embed_dim=config.encoder_hidden_dim, num_heads=config.num_attention_heads, dropout=config.dropout, ) self.self_attn_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps) self.dropout = config.dropout self.activation_fn = ACT2FN[config.encoder_activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(config.encoder_hidden_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, config.encoder_hidden_dim) self.final_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, **kwargs, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. position_embeddings (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states if self.normalize_before: hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.self_attn_layer_norm(hidden_states) if self.normalize_before: hidden_states = self.final_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class RTDetrRepVggBlock(nn.Module): """ RepVGG architecture block introduced by the work "RepVGG: Making VGG-style ConvNets Great Again". """ def __init__(self, config: RTDetrConfig): super().__init__() activation = config.activation_function hidden_channels = int(config.encoder_hidden_dim * config.hidden_expansion) self.conv1 = RTDetrConvNormLayer(config, hidden_channels, hidden_channels, 3, 1, padding=1) self.conv2 = RTDetrConvNormLayer(config, hidden_channels, hidden_channels, 1, 1, padding=0) self.activation = nn.Identity() if activation is None else ACT2CLS[activation]() def forward(self, x): y = self.conv1(x) + self.conv2(x) return self.activation(y) class RTDetrCSPRepLayer(nn.Module): """ Cross Stage Partial (CSP) network layer with RepVGG blocks. """ def __init__(self, config: RTDetrConfig): super().__init__() in_channels = config.encoder_hidden_dim * 2 out_channels = config.encoder_hidden_dim num_blocks = 3 activation = config.activation_function hidden_channels = int(out_channels * config.hidden_expansion) self.conv1 = RTDetrConvNormLayer(config, in_channels, hidden_channels, 1, 1, activation=activation) self.conv2 = RTDetrConvNormLayer(config, in_channels, hidden_channels, 1, 1, activation=activation) self.bottlenecks = nn.Sequential(*[RTDetrRepVggBlock(config) for _ in range(num_blocks)]) if hidden_channels != out_channels: self.conv3 = RTDetrConvNormLayer(config, hidden_channels, out_channels, 1, 1, activation=activation) else: self.conv3 = nn.Identity() def forward(self, hidden_state): hidden_state_1 = self.conv1(hidden_state) hidden_state_1 = self.bottlenecks(hidden_state_1) hidden_state_2 = self.conv2(hidden_state) return self.conv3(hidden_state_1 + hidden_state_2) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrMultiscaleDeformableAttention with DeformableDetr->RTDetr class RTDetrMultiscaleDeformableAttention(nn.Module): """ Multiscale deformable attention as proposed in Deformable DETR. """ def __init__(self, config: RTDetrConfig, num_heads: int, n_points: int): super().__init__() self.attn = MultiScaleDeformableAttention() if config.d_model % num_heads != 0: raise ValueError( f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}" ) dim_per_head = config.d_model // num_heads # check if dim_per_head is power of 2 if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): warnings.warn( "You'd better set embed_dim (d_model) in RTDetrMultiscaleDeformableAttention to make the" " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" " implementation." ) self.im2col_step = 64 self.d_model = config.d_model self.n_levels = config.num_feature_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2) self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points) self.value_proj = nn.Linear(config.d_model, config.d_model) self.output_proj = nn.Linear(config.d_model, config.d_model) self.disable_custom_kernels = config.disable_custom_kernels def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) batch_size, num_queries, _ = hidden_states.shape batch_size, sequence_length, _ = encoder_hidden_states.shape total_elements = sum(height * width for height, width in spatial_shapes_list) if total_elements != sequence_length: raise ValueError( "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" ) value = self.value_proj(encoder_hidden_states) if attention_mask is not None: # we invert the attention_mask value = value.masked_fill(~attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 ) attention_weights = self.attention_weights(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels * self.n_points ) attention_weights = F.softmax(attention_weights, -1).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points ) # batch_size, num_queries, n_heads, n_levels, n_points, 2 num_coordinates = reference_points.shape[-1] if num_coordinates == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = ( reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] ) elif num_coordinates == 4: sampling_locations = ( reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 ) else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") output = self.attn( value, spatial_shapes, spatial_shapes_list, level_start_index, sampling_locations, attention_weights, self.im2col_step, ) output = self.output_proj(output) return output, attention_weights class RTDetrMultiheadAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _reshape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, position_embeddings) # get queries, keys and values query_states = self.q_proj(hidden_states) * self.scaling key_states = self._reshape(self.k_proj(hidden_states), -1, batch_size) value_states = self._reshape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._reshape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) # expand attention_mask if attention_mask is not None: # [seq_len, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = attention_mask.expand(batch_size, 1, *attention_mask.size()) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) if attention_mask.dtype == torch.bool: attention_mask = torch.zeros_like(attention_mask, dtype=attn_weights.dtype).masked_fill_( attention_mask, -torch.inf ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class RTDetrDecoderLayer(nn.Module): def __init__(self, config: RTDetrConfig): super().__init__() # self-attention self.self_attn = RTDetrMultiheadAttention( embed_dim=config.d_model, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.decoder_activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) # cross-attention self.encoder_attn = RTDetrMultiscaleDeformableAttention( config, num_heads=config.decoder_attention_heads, n_points=config.decoder_n_points, ) self.encoder_attn_layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) # feedforward neural networks self.fc1 = nn.Linear(config.d_model, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, config.d_model) self.final_layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ): """ Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(seq_len, batch, embed_dim)`. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings that are added to the queries and keys in the self-attention layer. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes. level_start_index (`torch.LongTensor`, *optional*): Level start index. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) second_residual = hidden_states # Cross-Attention cross_attn_weights = None hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = second_residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs @auto_docstring class RTDetrPreTrainedModel(PreTrainedModel): config: RTDetrConfig base_model_prefix = "rt_detr" main_input_name = "pixel_values" _no_split_modules = [r"RTDetrHybridEncoder", r"RTDetrDecoderLayer"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (RTDetrForObjectDetection, RTDetrDecoder)): if module.class_embed is not None: for layer in module.class_embed: prior_prob = self.config.initializer_bias_prior_prob or 1 / (self.config.num_labels + 1) bias = float(-math.log((1 - prior_prob) / prior_prob)) nn.init.xavier_uniform_(layer.weight) nn.init.constant_(layer.bias, bias) if module.bbox_embed is not None: for layer in module.bbox_embed: nn.init.constant_(layer.layers[-1].weight, 0) nn.init.constant_(layer.layers[-1].bias, 0) elif isinstance(module, RTDetrMultiscaleDeformableAttention): nn.init.constant_(module.sampling_offsets.weight.data, 0.0) default_dtype = torch.get_default_dtype() thetas = torch.arange(module.n_heads, dtype=torch.int64).to(default_dtype) * ( 2.0 * math.pi / module.n_heads ) grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) grid_init = ( (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) .view(module.n_heads, 1, 1, 2) .repeat(1, module.n_levels, module.n_points, 1) ) for i in range(module.n_points): grid_init[:, :, i, :] *= i + 1 with torch.no_grad(): module.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) nn.init.constant_(module.attention_weights.weight.data, 0.0) nn.init.constant_(module.attention_weights.bias.data, 0.0) nn.init.xavier_uniform_(module.value_proj.weight.data) nn.init.constant_(module.value_proj.bias.data, 0.0) nn.init.xavier_uniform_(module.output_proj.weight.data) nn.init.constant_(module.output_proj.bias.data, 0.0) elif isinstance(module, RTDetrModel): prior_prob = self.config.initializer_bias_prior_prob or 1 / (self.config.num_labels + 1) bias = float(-math.log((1 - prior_prob) / prior_prob)) nn.init.xavier_uniform_(module.enc_score_head.weight) nn.init.constant_(module.enc_score_head.bias, bias) elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() if hasattr(module, "weight_embedding") and self.config.learn_initial_query: nn.init.xavier_uniform_(module.weight_embedding.weight) if hasattr(module, "denoising_class_embed") and self.config.num_denoising > 0: nn.init.xavier_uniform_(module.denoising_class_embed.weight) class RTDetrEncoder(nn.Module): def __init__(self, config: RTDetrConfig): super().__init__() self.layers = nn.ModuleList([RTDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) def forward(self, src, src_mask=None, pos_embed=None, output_attentions: bool = False) -> torch.Tensor: hidden_states = src for layer in self.layers: hidden_states = layer( hidden_states, attention_mask=src_mask, position_embeddings=pos_embed, output_attentions=output_attentions, ) return hidden_states class RTDetrHybridEncoder(nn.Module): """ Decoder consisting of a projection layer, a set of `RTDetrEncoder`, a top-down Feature Pyramid Network (FPN) and a bottom-up Path Aggregation Network (PAN). More details on the paper: https://huggingface.co/papers/2304.08069 Args: config: RTDetrConfig """ def __init__(self, config: RTDetrConfig): super().__init__() self.config = config self.in_channels = config.encoder_in_channels self.feat_strides = config.feat_strides self.encoder_hidden_dim = config.encoder_hidden_dim self.encode_proj_layers = config.encode_proj_layers self.positional_encoding_temperature = config.positional_encoding_temperature self.eval_size = config.eval_size self.out_channels = [self.encoder_hidden_dim for _ in self.in_channels] self.out_strides = self.feat_strides self.num_fpn_stages = len(self.in_channels) - 1 self.num_pan_stages = len(self.in_channels) - 1 activation = config.activation_function # encoder transformer self.encoder = nn.ModuleList([RTDetrEncoder(config) for _ in range(len(self.encode_proj_layers))]) # top-down FPN self.lateral_convs = nn.ModuleList() self.fpn_blocks = nn.ModuleList() for _ in range(self.num_fpn_stages): lateral_conv = RTDetrConvNormLayer( config, in_channels=self.encoder_hidden_dim, out_channels=self.encoder_hidden_dim, kernel_size=1, stride=1, activation=activation, ) fpn_block = RTDetrCSPRepLayer(config) self.lateral_convs.append(lateral_conv) self.fpn_blocks.append(fpn_block) # bottom-up PAN self.downsample_convs = nn.ModuleList() self.pan_blocks = nn.ModuleList() for _ in range(self.num_pan_stages): downsample_conv = RTDetrConvNormLayer( config, in_channels=self.encoder_hidden_dim, out_channels=self.encoder_hidden_dim, kernel_size=3, stride=2, activation=activation, ) pan_block = RTDetrCSPRepLayer(config) self.downsample_convs.append(downsample_conv) self.pan_blocks.append(pan_block) @staticmethod def build_2d_sincos_position_embedding( width, height, embed_dim=256, temperature=10000.0, device="cpu", dtype=torch.float32 ): grid_w = torch.arange(torch_int(width), device=device).to(dtype) grid_h = torch.arange(torch_int(height), device=device).to(dtype) grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="ij") if embed_dim % 4 != 0: raise ValueError("Embed dimension must be divisible by 4 for 2D sin-cos position embedding") pos_dim = embed_dim // 4 omega = torch.arange(pos_dim, device=device).to(dtype) / pos_dim omega = 1.0 / (temperature**omega) out_w = grid_w.flatten()[..., None] @ omega[None] out_h = grid_h.flatten()[..., None] @ omega[None] return torch.concat([out_w.sin(), out_w.cos(), out_h.sin(), out_h.cos()], dim=1)[None, :, :] def forward( self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer. spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): Starting index of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # encoder if self.config.encoder_layers > 0: for i, enc_ind in enumerate(self.encode_proj_layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states[enc_ind],) height, width = hidden_states[enc_ind].shape[2:] # flatten [batch, channel, height, width] to [batch, height*width, channel] src_flatten = hidden_states[enc_ind].flatten(2).permute(0, 2, 1) if self.training or self.eval_size is None: pos_embed = self.build_2d_sincos_position_embedding( width, height, self.encoder_hidden_dim, self.positional_encoding_temperature, device=src_flatten.device, dtype=src_flatten.dtype, ) else: pos_embed = None layer_outputs = self.encoder[i]( src_flatten, pos_embed=pos_embed, output_attentions=output_attentions, ) hidden_states[enc_ind] = ( layer_outputs[0].permute(0, 2, 1).reshape(-1, self.encoder_hidden_dim, height, width).contiguous() ) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states[enc_ind],) # top-down FPN fpn_feature_maps = [hidden_states[-1]] for idx, (lateral_conv, fpn_block) in enumerate(zip(self.lateral_convs, self.fpn_blocks)): backbone_feature_map = hidden_states[self.num_fpn_stages - idx - 1] top_fpn_feature_map = fpn_feature_maps[-1] # apply lateral block top_fpn_feature_map = lateral_conv(top_fpn_feature_map) fpn_feature_maps[-1] = top_fpn_feature_map # apply fpn block top_fpn_feature_map = F.interpolate(top_fpn_feature_map, scale_factor=2.0, mode="nearest") fused_feature_map = torch.concat([top_fpn_feature_map, backbone_feature_map], dim=1) new_fpn_feature_map = fpn_block(fused_feature_map) fpn_feature_maps.append(new_fpn_feature_map) fpn_feature_maps = fpn_feature_maps[::-1] # bottom-up PAN pan_feature_maps = [fpn_feature_maps[0]] for idx, (downsample_conv, pan_block) in enumerate(zip(self.downsample_convs, self.pan_blocks)): top_pan_feature_map = pan_feature_maps[-1] fpn_feature_map = fpn_feature_maps[idx + 1] downsampled_feature_map = downsample_conv(top_pan_feature_map) fused_feature_map = torch.concat([downsampled_feature_map, fpn_feature_map], dim=1) new_pan_feature_map = pan_block(fused_feature_map) pan_feature_maps.append(new_pan_feature_map) if not return_dict: return tuple(v for v in [pan_feature_maps, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=pan_feature_maps, hidden_states=encoder_states, attentions=all_attentions ) class RTDetrDecoder(RTDetrPreTrainedModel): def __init__(self, config: RTDetrConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([RTDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) self.query_pos_head = RTDetrMLPPredictionHead(config, 4, 2 * config.d_model, config.d_model, num_layers=2) # hack implementation for iterative bounding box refinement and two-stage Deformable DETR self.bbox_embed = None self.class_embed = None # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): The query embeddings that are passed into the decoder. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*): Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area. spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of the feature maps. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*): Indexes for the start of each feature level. In range `[0, sequence_length]`. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None intermediate = () intermediate_reference_points = () intermediate_logits = () reference_points = F.sigmoid(reference_points) # https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/zoo/rtdetr/rtdetr_decoder.py#L252 for idx, decoder_layer in enumerate(self.layers): reference_points_input = reference_points.unsqueeze(2) position_embeddings = self.query_pos_head(reference_points) if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, position_embeddings=position_embeddings, encoder_hidden_states=encoder_hidden_states, reference_points=reference_points_input, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] # hack implementation for iterative bounding box refinement if self.bbox_embed is not None: predicted_corners = self.bbox_embed[idx](hidden_states) new_reference_points = F.sigmoid(predicted_corners + inverse_sigmoid(reference_points)) reference_points = new_reference_points.detach() intermediate += (hidden_states,) intermediate_reference_points += ( (new_reference_points,) if self.bbox_embed is not None else (reference_points,) ) if self.class_embed is not None: logits = self.class_embed[idx](hidden_states) intermediate_logits += (logits,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # Keep batch_size as first dimension intermediate = torch.stack(intermediate, dim=1) intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1) if self.class_embed is not None: intermediate_logits = torch.stack(intermediate_logits, dim=1) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, intermediate, intermediate_logits, intermediate_reference_points, all_hidden_states, all_self_attns, all_cross_attentions, ] if v is not None ) return RTDetrDecoderOutput( last_hidden_state=hidden_states, intermediate_hidden_states=intermediate, intermediate_logits=intermediate_logits, intermediate_reference_points=intermediate_reference_points, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py class RTDetrMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py Origin from https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_paddle/ppdet/modeling/transformers/utils.py#L453 """ def __init__(self, config, input_dim, d_model, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [d_model] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x @auto_docstring( custom_intro=""" RT-DETR Model (consisting of a backbone and encoder-decoder) outputting raw hidden states without any head on top. """ ) class RTDetrModel(RTDetrPreTrainedModel): def __init__(self, config: RTDetrConfig): super().__init__(config) # Create backbone self.backbone = RTDetrConvEncoder(config) intermediate_channel_sizes = self.backbone.intermediate_channel_sizes # Create encoder input projection layers # https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/zoo/rtdetr/hybrid_encoder.py#L212 num_backbone_outs = len(intermediate_channel_sizes) encoder_input_proj_list = [] for _ in range(num_backbone_outs): in_channels = intermediate_channel_sizes[_] encoder_input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.encoder_hidden_dim, kernel_size=1, bias=False), nn.BatchNorm2d(config.encoder_hidden_dim), ) ) self.encoder_input_proj = nn.ModuleList(encoder_input_proj_list) # Create encoder self.encoder = RTDetrHybridEncoder(config) # denoising part if config.num_denoising > 0: self.denoising_class_embed = nn.Embedding( config.num_labels + 1, config.d_model, padding_idx=config.num_labels ) # decoder embedding if config.learn_initial_query: self.weight_embedding = nn.Embedding(config.num_queries, config.d_model) # encoder head self.enc_output = nn.Sequential( nn.Linear(config.d_model, config.d_model), nn.LayerNorm(config.d_model, eps=config.layer_norm_eps), ) self.enc_score_head = nn.Linear(config.d_model, config.num_labels) self.enc_bbox_head = RTDetrMLPPredictionHead(config, config.d_model, config.d_model, 4, num_layers=3) # init encoder output anchors and valid_mask if config.anchor_image_size: self.anchors, self.valid_mask = self.generate_anchors(dtype=self.dtype) # Create decoder input projection layers # https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/zoo/rtdetr/rtdetr_decoder.py#L412 num_backbone_outs = len(config.decoder_in_channels) decoder_input_proj_list = [] for _ in range(num_backbone_outs): in_channels = config.decoder_in_channels[_] decoder_input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=1, bias=False), nn.BatchNorm2d(config.d_model, config.batch_norm_eps), ) ) for _ in range(config.num_feature_levels - num_backbone_outs): decoder_input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(config.d_model, config.batch_norm_eps), ) ) in_channels = config.d_model self.decoder_input_proj = nn.ModuleList(decoder_input_proj_list) # decoder self.decoder = RTDetrDecoder(config) self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for param in self.backbone.parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for param in self.backbone.parameters(): param.requires_grad_(True) @compile_compatible_method_lru_cache(maxsize=32) def generate_anchors(self, spatial_shapes=None, grid_size=0.05, device="cpu", dtype=torch.float32): if spatial_shapes is None: spatial_shapes = [ [int(self.config.anchor_image_size[0] / s), int(self.config.anchor_image_size[1] / s)] for s in self.config.feat_strides ] anchors = [] for level, (height, width) in enumerate(spatial_shapes): grid_y, grid_x = torch.meshgrid( torch.arange(end=height, device=device).to(dtype), torch.arange(end=width, device=device).to(dtype), indexing="ij", ) grid_xy = torch.stack([grid_x, grid_y], -1) grid_xy = grid_xy.unsqueeze(0) + 0.5 grid_xy[..., 0] /= width grid_xy[..., 1] /= height wh = torch.ones_like(grid_xy) * grid_size * (2.0**level) anchors.append(torch.concat([grid_xy, wh], -1).reshape(-1, height * width, 4)) # define the valid range for anchor coordinates eps = 1e-2 anchors = torch.concat(anchors, 1) valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True) anchors = torch.log(anchors / (1 - anchors)) anchors = torch.where(valid_mask, anchors, torch.tensor(torch.finfo(dtype).max, dtype=dtype, device=device)) return anchors, valid_mask @auto_docstring def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[list[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.FloatTensor], RTDetrModelOutput]: r""" inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. labels (`list[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Examples: ```python >>> from transformers import AutoImageProcessor, RTDetrModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("PekingU/rtdetr_r50vd") >>> model = RTDetrModel.from_pretrained("PekingU/rtdetr_r50vd") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 300, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), device=device) features = self.backbone(pixel_values, pixel_mask) proj_feats = [self.encoder_input_proj[level](source) for level, (source, mask) in enumerate(features)] if encoder_outputs is None: encoder_outputs = self.encoder( proj_feats, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if output_hidden_states else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else encoder_outputs[1] if output_attentions else None, ) # Equivalent to def _get_encoder_input # https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/zoo/rtdetr/rtdetr_decoder.py#L412 sources = [] for level, source in enumerate(encoder_outputs[0]): sources.append(self.decoder_input_proj[level](source)) # Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage if self.config.num_feature_levels > len(sources): _len_sources = len(sources) sources.append(self.decoder_input_proj[_len_sources](encoder_outputs[0])[-1]) for i in range(_len_sources + 1, self.config.num_feature_levels): sources.append(self.decoder_input_proj[i](encoder_outputs[0][-1])) # Prepare encoder inputs (by flattening) source_flatten = [] spatial_shapes_list = [] spatial_shapes = torch.empty((len(sources), 2), device=device, dtype=torch.long) for level, source in enumerate(sources): height, width = source.shape[-2:] spatial_shapes[level, 0] = height spatial_shapes[level, 1] = width spatial_shapes_list.append((height, width)) source = source.flatten(2).transpose(1, 2) source_flatten.append(source) source_flatten = torch.cat(source_flatten, 1) level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) # prepare denoising training if self.training and self.config.num_denoising > 0 and labels is not None: ( denoising_class, denoising_bbox_unact, attention_mask, denoising_meta_values, ) = get_contrastive_denoising_training_group( targets=labels, num_classes=self.config.num_labels, num_queries=self.config.num_queries, class_embed=self.denoising_class_embed, num_denoising_queries=self.config.num_denoising, label_noise_ratio=self.config.label_noise_ratio, box_noise_scale=self.config.box_noise_scale, ) else: denoising_class, denoising_bbox_unact, attention_mask, denoising_meta_values = None, None, None, None batch_size = len(source_flatten) device = source_flatten.device dtype = source_flatten.dtype # prepare input for decoder if self.training or self.config.anchor_image_size is None: # Pass spatial_shapes as tuple to make it hashable and make sure # lru_cache is working for generate_anchors() spatial_shapes_tuple = tuple(spatial_shapes_list) anchors, valid_mask = self.generate_anchors(spatial_shapes_tuple, device=device, dtype=dtype) else: anchors, valid_mask = self.anchors, self.valid_mask anchors, valid_mask = anchors.to(device, dtype), valid_mask.to(device, dtype) # use the valid_mask to selectively retain values in the feature map where the mask is `True` memory = valid_mask.to(source_flatten.dtype) * source_flatten output_memory = self.enc_output(memory) enc_outputs_class = self.enc_score_head(output_memory) enc_outputs_coord_logits = self.enc_bbox_head(output_memory) + anchors _, topk_ind = torch.topk(enc_outputs_class.max(-1).values, self.config.num_queries, dim=1) reference_points_unact = enc_outputs_coord_logits.gather( dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, enc_outputs_coord_logits.shape[-1]) ) enc_topk_bboxes = F.sigmoid(reference_points_unact) if denoising_bbox_unact is not None: reference_points_unact = torch.concat([denoising_bbox_unact, reference_points_unact], 1) enc_topk_logits = enc_outputs_class.gather( dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, enc_outputs_class.shape[-1]) ) # extract region features if self.config.learn_initial_query: target = self.weight_embedding.tile([batch_size, 1, 1]) else: target = output_memory.gather(dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, output_memory.shape[-1])) target = target.detach() if denoising_class is not None: target = torch.concat([denoising_class, target], 1) init_reference_points = reference_points_unact.detach() # decoder decoder_outputs = self.decoder( inputs_embeds=target, encoder_hidden_states=source_flatten, encoder_attention_mask=attention_mask, reference_points=init_reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: enc_outputs = tuple( value for value in [enc_topk_logits, enc_topk_bboxes, enc_outputs_class, enc_outputs_coord_logits] if value is not None ) dn_outputs = tuple(value if value is not None else None for value in [denoising_meta_values]) tuple_outputs = decoder_outputs + encoder_outputs + (init_reference_points,) + enc_outputs + dn_outputs return tuple_outputs return RTDetrModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_logits=decoder_outputs.intermediate_logits, intermediate_reference_points=decoder_outputs.intermediate_reference_points, intermediate_predicted_corners=decoder_outputs.intermediate_predicted_corners, initial_reference_points=decoder_outputs.initial_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, init_reference_points=init_reference_points, enc_topk_logits=enc_topk_logits, enc_topk_bboxes=enc_topk_bboxes, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits, denoising_meta_values=denoising_meta_values, ) @auto_docstring( custom_intro=""" RT-DETR Model (consisting of a backbone and encoder-decoder) outputting bounding boxes and logits to be further decoded into scores and classes. """ ) class RTDetrForObjectDetection(RTDetrPreTrainedModel): # When using clones, all layers > 0 will be clones, but layer 0 *is* required _tied_weights_keys = ["bbox_embed", "class_embed"] # We can't initialize the model on meta device as some weights are modified during the initialization _no_split_modules = None def __init__(self, config: RTDetrConfig): super().__init__(config) # RTDETR encoder-decoder model self.model = RTDetrModel(config) # Detection heads on top self.class_embed = partial(nn.Linear, config.d_model, config.num_labels) self.bbox_embed = partial(RTDetrMLPPredictionHead, config, config.d_model, config.d_model, 4, num_layers=3) # if two-stage, the last class_embed and bbox_embed is for region proposal generation num_pred = config.decoder_layers if config.with_box_refine: self.class_embed = _get_clones(self.class_embed, num_pred) self.bbox_embed = _get_clones(self.bbox_embed, num_pred) else: self.class_embed = nn.ModuleList([self.class_embed() for _ in range(num_pred)]) self.bbox_embed = nn.ModuleList([self.bbox_embed() for _ in range(num_pred)]) # hack implementation for iterative bounding box refinement self.model.decoder.class_embed = self.class_embed self.model.decoder.bbox_embed = self.bbox_embed # Initialize weights and apply final processing self.post_init() @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class, outputs_coord)] @auto_docstring def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[list[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.FloatTensor], RTDetrObjectDetectionOutput]: r""" inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. labels (`list[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Examples: ```python >>> from transformers import RTDetrImageProcessor, RTDetrForObjectDetection >>> from PIL import Image >>> import requests >>> import torch >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = RTDetrImageProcessor.from_pretrained("PekingU/rtdetr_r50vd") >>> model = RTDetrForObjectDetection.from_pretrained("PekingU/rtdetr_r50vd") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 300, 80] >>> boxes = outputs.pred_boxes >>> list(boxes.shape) [1, 300, 4] >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected sofa with confidence 0.97 at location [0.14, 0.38, 640.13, 476.21] Detected cat with confidence 0.96 at location [343.38, 24.28, 640.14, 371.5] Detected cat with confidence 0.958 at location [13.23, 54.18, 318.98, 472.22] Detected remote with confidence 0.951 at location [40.11, 73.44, 175.96, 118.48] Detected remote with confidence 0.924 at location [333.73, 76.58, 369.97, 186.99] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( pixel_values, pixel_mask=pixel_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) denoising_meta_values = ( outputs.denoising_meta_values if return_dict else outputs[-1] if self.training else None ) outputs_class = outputs.intermediate_logits if return_dict else outputs[2] outputs_coord = outputs.intermediate_reference_points if return_dict else outputs[3] predicted_corners = outputs.intermediate_predicted_corners if return_dict else outputs[4] initial_reference_points = outputs.initial_reference_points if return_dict else outputs[5] logits = outputs_class[:, -1] pred_boxes = outputs_coord[:, -1] loss, loss_dict, auxiliary_outputs, enc_topk_logits, enc_topk_bboxes = None, None, None, None, None if labels is not None: enc_topk_logits = outputs.enc_topk_logits if return_dict else outputs[-5] enc_topk_bboxes = outputs.enc_topk_bboxes if return_dict else outputs[-4] loss, loss_dict, auxiliary_outputs = self.loss_function( logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord, enc_topk_logits=enc_topk_logits, enc_topk_bboxes=enc_topk_bboxes, denoising_meta_values=denoising_meta_values, predicted_corners=predicted_corners, initial_reference_points=initial_reference_points, **kwargs, ) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + (auxiliary_outputs,) + outputs else: output = (logits, pred_boxes) + outputs return ((loss, loss_dict) + output) if loss is not None else output return RTDetrObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_logits=outputs.intermediate_logits, intermediate_reference_points=outputs.intermediate_reference_points, intermediate_predicted_corners=outputs.intermediate_predicted_corners, initial_reference_points=outputs.initial_reference_points, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, init_reference_points=outputs.init_reference_points, enc_topk_logits=outputs.enc_topk_logits, enc_topk_bboxes=outputs.enc_topk_bboxes, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, denoising_meta_values=outputs.denoising_meta_values, ) __all__ = [ "RTDetrForObjectDetection", "RTDetrModel", "RTDetrPreTrainedModel", ]
transformers/src/transformers/models/rt_detr/modeling_rt_detr.py/0
{ "file_path": "transformers/src/transformers/models/rt_detr/modeling_rt_detr.py", "repo_id": "transformers", "token_count": 41361 }
521
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for SAM.""" import math from copy import deepcopy from itertools import product from typing import Any, Optional, Union import numpy as np import torch from ...image_processing_utils import BatchFeature, get_size_dict from ...image_processing_utils_fast import ( BaseImageProcessorFast, DefaultFastImageProcessorKwargs, group_images_by_shape, reorder_images, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, ) from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, is_torch_available, is_torchvision_available, is_torchvision_v2_available, ) if is_torch_available(): import torch from torch.nn import functional as F if is_torchvision_v2_available(): from torchvision.ops.boxes import batched_nms from torchvision.transforms.v2 import functional as F_t elif is_torchvision_available(): from torchvision.ops.boxes import batched_nms from torchvision.transforms import functional as F_t class SamFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): r""" do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. If `True`, padding will be applied to the bottom and right of the image with zeros. pad_size (`dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. mask_size (`dict[str, int]`, *optional*): The size `{"longest_edge": int}` to resize the segmentation maps to. mask_pad_size (`dict[str, int]`, *optional*): The size `{"height": int, "width": int}` to pad the segmentation maps to. Must be larger than any segmentation map size provided for preprocessing. """ mask_size: Optional[dict[str, int]] do_pad: Optional[bool] pad_size: Optional[dict[str, int]] mask_pad_size: Optional[dict[str, int]] @auto_docstring class SamImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_DEFAULT_MEAN image_std = IMAGENET_DEFAULT_STD size = {"longest_edge": 1024} mask_size = {"longest_edge": 256} do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True valid_kwargs = SamFastImageProcessorKwargs do_pad = True pad_size = {"height": 1024, "width": 1024} mask_pad_size = {"height": 256, "width": 256} def __init__(self, **kwargs: Unpack[SamFastImageProcessorKwargs]): super().__init__(**kwargs) def pad_image(self, images: "torch.Tensor", pad_size: SizeDict): """Pad images to the specified size.""" output_height, output_width = pad_size.height, pad_size.width input_height, input_width = images.shape[-2:] pad_width = output_width - input_width pad_height = output_height - input_height padding = (0, 0, pad_width, pad_height) return F_t.pad(images, padding) def _get_preprocess_shape(self, old_shape: tuple[int, int], longest_edge: int): """ Compute the output size given input size and target long side length. """ oldh, oldw = old_shape scale = longest_edge * 1.0 / max(oldh, oldw) newh, neww = oldh * scale, oldw * scale newh = int(newh + 0.5) neww = int(neww + 0.5) return (newh, neww) def resize( self, image: "torch.Tensor", size: SizeDict, interpolation: Optional["F_t.InterpolationMode"], **kwargs ) -> "torch.Tensor": """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"longest_edge": int}` specifying the size of the output image. The longest edge of the image will be resized to the specified size, while the other edge will be resized to maintain the aspect ratio. interpolation: `F_t.InterpolationMode` filter to use when resizing the image e.g. `F_t.InterpolationMode.BICUBIC`. Returns: `torch.Tensor`: The resized image. """ if not size.longest_edge: raise ValueError(f"The `size` dictionary must contain the key `longest_edge`. Got {size.keys()}") input_size = image.shape[-2:] output_height, output_width = self._get_preprocess_shape(input_size, size.longest_edge) return super().resize( image, size=SizeDict(height=output_height, width=output_width), interpolation=interpolation, **kwargs ) def _further_process_kwargs( self, size: Optional[SizeDict] = None, pad_size: Optional[SizeDict] = None, mask_size: Optional[SizeDict] = None, mask_pad_size: Optional[SizeDict] = None, default_to_square: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, data_format: Optional[ChannelDimension] = None, **kwargs, ) -> dict: """ Update kwargs that need further processing before being validated Can be overridden by subclasses to customize the processing of kwargs. """ if kwargs is None: kwargs = {} if size is not None: size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square)) if pad_size is not None: pad_size = SizeDict(**get_size_dict(pad_size, param_name="pad_size")) if mask_size is not None: mask_size = SizeDict(**get_size_dict(mask_size, param_name="mask_size")) if mask_pad_size is not None: mask_pad_size = SizeDict(**get_size_dict(mask_pad_size, param_name="mask_pad_size")) if isinstance(image_mean, list): image_mean = tuple(image_mean) if isinstance(image_std, list): image_std = tuple(image_std) if data_format is None: data_format = ChannelDimension.FIRST kwargs["size"] = size kwargs["pad_size"] = pad_size kwargs["mask_size"] = mask_size kwargs["mask_pad_size"] = mask_pad_size kwargs["default_to_square"] = default_to_square kwargs["image_mean"] = image_mean kwargs["image_std"] = image_std kwargs["data_format"] = data_format return kwargs @auto_docstring def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, **kwargs: Unpack[SamFastImageProcessorKwargs], ) -> BatchFeature: r""" segmentation_maps (`ImageInput`, *optional*): The segmentation maps to preprocess. """ return super().preprocess(images, segmentation_maps, **kwargs) def _preprocess_image_like_inputs( self, images: ImageInput, segmentation_maps: Optional[ImageInput], do_convert_rgb: bool, input_data_format: ChannelDimension, device: Optional[Union[str, "torch.device"]] = None, **kwargs: Unpack[SamFastImageProcessorKwargs], ) -> BatchFeature: """ Preprocess image-like inputs. """ images = self._prepare_image_like_inputs( images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device ) original_sizes = [image.shape[-2:] for image in images] images_kwargs = kwargs.copy() pixel_values = self._preprocess(images, **images_kwargs) reshaped_input_sizes = [image.shape[-2:] for image in images] data = { "pixel_values": pixel_values, "original_sizes": original_sizes, "reshaped_input_sizes": reshaped_input_sizes, } if segmentation_maps is not None: processed_segmentation_maps = self._prepare_image_like_inputs( images=segmentation_maps, expected_ndims=2, do_convert_rgb=False, input_data_format=ChannelDimension.FIRST, ) segmentation_maps_kwargs = kwargs.copy() segmentation_maps_kwargs.update( { "do_normalize": False, "do_rescale": False, "interpolation": F_t.InterpolationMode.NEAREST_EXACT if is_torchvision_v2_available() else F_t.InterpolationMode.NEAREST, "size": segmentation_maps_kwargs.pop("mask_size"), "pad_size": segmentation_maps_kwargs.pop("mask_pad_size"), } ) processed_segmentation_maps = self._preprocess( images=processed_segmentation_maps, **segmentation_maps_kwargs ) data["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64) return BatchFeature(data=data, tensor_type=kwargs["return_tensors"]) def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, interpolation: Optional["F_t.InterpolationMode"], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, pad_size: SizeDict, disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> Union["torch.Tensor", list["torch.Tensor"]]: # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) if do_pad: stacked_images = self.pad_image(stacked_images, pad_size) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return processed_images def generate_crop_boxes( self, image: "torch.Tensor", target_size, crop_n_layers: int = 0, overlap_ratio: float = 512 / 1500, points_per_crop: Optional[int] = 32, crop_n_points_downscale_factor: Optional[list[int]] = 1, device: Optional["torch.device"] = None, ): """ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. Args: image (`torch.Tensor`): Input original image target_size (`int`): Target size of the resized image crop_n_layers (`int`, *optional*, defaults to 0): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. overlap_ratio (`float`, *optional*, defaults to 512/1500): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. points_per_crop (`int`, *optional*, defaults to 32): Number of points to sample from each crop. crop_n_points_downscale_factor (`list[int]`, *optional*, defaults to 1): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. device (`torch.device`, *optional*, defaults to None): Device to use for the computation. If None, cpu will be used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. return_tensors (`str`, *optional*, defaults to `pt`): If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. """ image = self._process_image(image) crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes( image, target_size, crop_n_layers, overlap_ratio, points_per_crop, crop_n_points_downscale_factor, ) if device is None: device = torch.device("cpu") crop_boxes = crop_boxes.to(device) points_per_crop = points_per_crop.to(device) # cropped_images stays as torch.Tensor input_labels = input_labels.to(device) return crop_boxes, points_per_crop, cropped_images, input_labels def filter_masks( self, masks, iou_scores, original_size, cropped_box_image, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, ): """ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to bounding boxes and pad the predicted masks if necessary. Args: masks (`torch.Tensor`): Input masks. iou_scores (`torch.Tensor`): List of IoU scores. original_size (`tuple[int,int]`): Size of the original image. cropped_box_image (`torch.Tensor`): The cropped image. pred_iou_thresh (`float`, *optional*, defaults to 0.88): The threshold for the iou scores. stability_score_thresh (`float`, *optional*, defaults to 0.95): The threshold for the stability score. mask_threshold (`float`, *optional*, defaults to 0): The threshold for the predicted masks. stability_score_offset (`float`, *optional*, defaults to 1): The offset for the stability score used in the `_compute_stability_score` method. """ original_height, original_width = original_size iou_scores = iou_scores.flatten(0, 1) masks = masks.flatten(0, 1) if masks.shape[0] != iou_scores.shape[0]: raise ValueError("masks and iou_scores must have the same batch size.") if masks.device != iou_scores.device: iou_scores = iou_scores.to(masks.device) batch_size = masks.shape[0] keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device) if pred_iou_thresh > 0.0: keep_mask = keep_mask & (iou_scores > pred_iou_thresh) # compute stability score if stability_score_thresh > 0.0: stability_scores = _compute_stability_score(masks, mask_threshold, stability_score_offset) keep_mask = keep_mask & (stability_scores > stability_score_thresh) scores = iou_scores[keep_mask] masks = masks[keep_mask] # binarize masks masks = masks > mask_threshold converted_boxes = _batched_mask_to_box(masks) keep_mask = ~_is_box_near_crop_edge( converted_boxes, cropped_box_image, [0, 0, original_width, original_height] ) scores = scores[keep_mask] masks = masks[keep_mask] converted_boxes = converted_boxes[keep_mask] masks = _pad_masks(masks, cropped_box_image, original_height, original_width) # conversion to rle is necessary to run non-maximum suppression masks = _mask_to_rle(masks) return masks, scores, converted_boxes def post_process_masks( self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None, ): """ Remove padding and upscale masks to the original image size. Args: masks (`Union[List[torch.Tensor], List[np.ndarray]]`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. mask_threshold (`float`, *optional*, defaults to 0.0): The threshold to use for binarizing the masks. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. pad_size (`int`, *optional*, defaults to `self.pad_size`): The target size the images were padded to before being passed to the model. If None, the target size is assumed to be the processor's `pad_size`. Returns: (`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ pad_size = self.size if pad_size is None else pad_size target_image_size = (pad_size["height"], pad_size["width"]) if isinstance(original_sizes, (torch.Tensor, np.ndarray)): original_sizes = original_sizes.tolist() if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)): reshaped_input_sizes = reshaped_input_sizes.tolist() output_masks = [] for i, original_size in enumerate(original_sizes): if isinstance(masks[i], np.ndarray): masks[i] = torch.from_numpy(masks[i]) elif not isinstance(masks[i], torch.Tensor): raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False) interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]] interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False) if binarize: interpolated_mask = interpolated_mask > mask_threshold output_masks.append(interpolated_mask) return output_masks def post_process_for_mask_generation(self, all_masks, all_scores, all_boxes, crops_nms_thresh): """ Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks. Args: all_masks (`torch.Tensor`): List of all predicted segmentation masks all_scores (`torch.Tensor`): List of all predicted iou scores all_boxes (`torch.Tensor`): List of all bounding boxes of the predicted masks crops_nms_thresh (`float`): Threshold for NMS (Non Maximum Suppression) algorithm. """ return _post_process_for_mask_generation(all_masks, all_scores, all_boxes, crops_nms_thresh) def _compute_stability_score(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int): # One mask is always contained inside the other. # Save memory by preventing unnecessary cast to torch.int64 intersections = ( (masks > (mask_threshold + stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) ) unions = (masks > (mask_threshold - stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) stability_scores = intersections / unions return stability_scores def _mask_to_rle(input_mask: "torch.Tensor"): """ Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools. """ # Put in fortran order and flatten height and width batch_size, height, width = input_mask.shape input_mask = input_mask.permute(0, 2, 1).flatten(1) # Compute change indices diff = input_mask[:, 1:] ^ input_mask[:, :-1] change_indices = diff.nonzero() # Encode run length out = [] for i in range(batch_size): cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1 if len(cur_idxs) == 0: # No changes => either all 0 or all 1 # If the entire mask is 0, RLE is [height*width] or if the entire mask is 1, RLE is [0, height*width]. if input_mask[i, 0] == 0: out.append({"size": [height, width], "counts": [height * width]}) else: out.append({"size": [height, width], "counts": [0, height * width]}) continue btw_idxs = cur_idxs[1:] - cur_idxs[:-1] counts = [] if input_mask[i, 0] == 0 else [0] counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1].item()] out.append({"size": [height, width], "counts": counts}) return out def _batched_mask_to_box(masks: "torch.Tensor"): """ Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which corresponds the following required indices: - LEFT: left hand side of the bounding box - TOP: top of the bounding box - RIGHT: right of the bounding box - BOTTOM: bottom of the bounding box Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape is channel_1 x channel_2 x ... x 4. Args: - masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`) """ # torch.max below raises an error on empty inputs, just skip in this case if torch.numel(masks) == 0: return torch.zeros(*masks.shape[:-2], 4, device=masks.device) # Normalize shape to Cxheightxwidth shape = masks.shape height, width = shape[-2:] # Get top and bottom edges in_height, _ = torch.max(masks, dim=-1) in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :] bottom_edges, _ = torch.max(in_height_coords, dim=-1) in_height_coords = in_height_coords + height * (~in_height) top_edges, _ = torch.min(in_height_coords, dim=-1) # Get left and right edges in_width, _ = torch.max(masks, dim=-2) in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :] right_edges, _ = torch.max(in_width_coords, dim=-1) in_width_coords = in_width_coords + width * (~in_width) left_edges, _ = torch.min(in_width_coords, dim=-1) # If the mask is empty the right edge will be to the left of the left edge. # Replace these boxes with [0, 0, 0, 0] empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) out = out * (~empty_filter).unsqueeze(-1) # Return to original shape out = out.reshape(*shape[:-2], 4) return out def _is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0): """Filter masks at the edge of a crop, but not at the edge of the original image.""" crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) left, top, _, _ = crop_box offset = torch.tensor([[left, top, left, top]], device=boxes.device) # Check if boxes has a channel dimension if len(boxes.shape) == 3: offset = offset.unsqueeze(1) boxes = (boxes + offset).float() near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) return torch.any(near_crop_edge, dim=1) def _pad_masks(masks, crop_box: list[int], orig_height: int, orig_width: int): left, top, right, bottom = crop_box if left == 0 and top == 0 and right == orig_width and bottom == orig_height: return masks # Coordinate transform masks pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top) pad = (left, pad_x - left, top, pad_y - top) return torch.nn.functional.pad(masks, pad, value=0) def _generate_crop_boxes( image, target_size: int, # Is it tuple here? crop_n_layers: int = 0, overlap_ratio: float = 512 / 1500, points_per_crop: Optional[int] = 32, crop_n_points_downscale_factor: Optional[list[int]] = 1, ) -> tuple[list[list[int]], list[int]]: """ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. Args: image (Union[`numpy.ndarray`, `PIL.Image`, `torch.Tensor`]): Image to generate crops for. target_size (`int`): Size of the smallest crop. crop_n_layers (`int`, *optional*): If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. overlap_ratio (`int`, *optional*): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. points_per_crop (`int`, *optional*): Number of points to sample per crop. crop_n_points_downscale_factor (`int`, *optional*): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if isinstance(image, list): raise ValueError("Only one image is allowed for crop generation.") original_size = image.shape[-2:] points_grid = [] for i in range(crop_n_layers + 1): n_points = int(points_per_crop / (crop_n_points_downscale_factor**i)) points_grid.append(_build_point_grid(n_points)) crop_boxes, layer_idxs = _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size) cropped_images, point_grid_per_crop = _generate_crop_images( crop_boxes, image, points_grid, layer_idxs, target_size, original_size ) crop_boxes = torch.tensor(crop_boxes) crop_boxes = crop_boxes.float() points_per_crop = torch.stack(point_grid_per_crop) points_per_crop = points_per_crop.unsqueeze(0).permute(0, 2, 1, 3) cropped_images = torch.stack(cropped_images) input_labels = torch.ones_like(points_per_crop[:, :, :, 0], dtype=torch.int64) return crop_boxes, points_per_crop, cropped_images, input_labels def _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size): """ Generates 2 ** (layers idx + 1) crops for each crop_n_layers. Crops are in the XYWH format : The XYWH format consists of the following required indices: - X: X coordinate of the top left of the bounding box - Y: Y coordinate of the top left of the bounding box - W: width of the bounding box - H: height of the bounding box """ crop_boxes, layer_idxs = [], [] im_height, im_width = original_size short_side = min(im_height, im_width) # Original image crop_boxes.append([0, 0, im_width, im_height]) layer_idxs.append(0) for i_layer in range(crop_n_layers): n_crops_per_side = 2 ** (i_layer + 1) overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) crop_width = int(math.ceil((overlap * (n_crops_per_side - 1) + im_width) / n_crops_per_side)) crop_height = int(math.ceil((overlap * (n_crops_per_side - 1) + im_height) / n_crops_per_side)) crop_box_x0 = [int((crop_width - overlap) * i) for i in range(n_crops_per_side)] crop_box_y0 = [int((crop_height - overlap) * i) for i in range(n_crops_per_side)] for left, top in product(crop_box_x0, crop_box_y0): box = [left, top, min(left + crop_width, im_width), min(top + crop_height, im_height)] crop_boxes.append(box) layer_idxs.append(i_layer + 1) return crop_boxes, layer_idxs def _build_point_grid(n_per_side: int) -> torch.Tensor: """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" offset = 1 / (2 * n_per_side) points_one_side = torch.linspace(offset, 1 - offset, n_per_side) points_x = torch.tile(points_one_side[None, :], (n_per_side, 1)) points_y = torch.tile(points_one_side[:, None], (1, n_per_side)) points = torch.stack([points_x, points_y], dim=-1).reshape(-1, 2) return points def _generate_crop_images( crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None ): """ Takes as an input bounding boxes that are used to crop the image. Based in the crops, the corresponding points are also passed. """ cropped_images = [] total_points_per_crop = [] for i, crop_box in enumerate(crop_boxes): left, top, right, bottom = crop_box cropped_im = image[:, top:bottom, left:right] cropped_images.append(cropped_im) cropped_im_size = cropped_im.shape[-2:] points_scale = torch.tensor(cropped_im_size).flip(dims=(0,)).unsqueeze(0) points = points_grid[layer_idxs[i]] * points_scale normalized_points = _normalize_coordinates(target_size, points, original_size) total_points_per_crop.append(normalized_points) return cropped_images, total_points_per_crop def _normalize_coordinates( target_size: int, coords: torch.Tensor, original_size: tuple[int, int], is_bounding_box=False ) -> torch.Tensor: """ Expects a numpy array of length 2 in the final dimension. Requires the original image size in (height, width) format. """ old_height, old_width = original_size scale = target_size * 1.0 / max(old_height, old_width) new_height, new_width = old_height * scale, old_width * scale new_width = int(new_width + 0.5) new_height = int(new_height + 0.5) coords = deepcopy(coords).float() if is_bounding_box: coords = coords.reshape(-1, 2, 2) coords[..., 0] = coords[..., 0] * (new_width / old_width) coords[..., 1] = coords[..., 1] * (new_height / old_height) if is_bounding_box: coords = coords.reshape(-1, 4) return coords def _rle_to_mask(rle: dict[str, Any]) -> torch.Tensor: """Compute a binary mask from an uncompressed RLE.""" height, width = rle["size"] mask = torch.empty(height * width, dtype=bool) idx = 0 parity = False for count in rle["counts"]: mask[idx : idx + count] = parity idx += count parity = not parity mask = mask.reshape(width, height) return mask.transpose(0, 1) # Reshape to original shape def _post_process_for_mask_generation(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7): """ Perform NMS (Non Maximum Suppression) on the outputs. Args: rle_masks (`torch.Tensor`): binary masks in the RLE format iou_scores (`torch.Tensor` of shape (nb_masks, 1)): iou_scores predicted by the model mask_boxes (`torch.Tensor`): The bounding boxes corresponding to segmentation masks amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7): NMS threshold. """ keep_by_nms = batched_nms( boxes=mask_boxes.float(), scores=iou_scores, idxs=torch.zeros(mask_boxes.shape[0]), iou_threshold=amg_crops_nms_thresh, ) iou_scores = iou_scores[keep_by_nms] rle_masks = [rle_masks[i] for i in keep_by_nms] mask_boxes = mask_boxes[keep_by_nms] masks = [_rle_to_mask(rle) for rle in rle_masks] return masks, iou_scores, rle_masks, mask_boxes __all__ = ["SamImageProcessorFast"]
transformers/src/transformers/models/sam/image_processing_sam_fast.py/0
{ "file_path": "transformers/src/transformers/models/sam/image_processing_sam_fast.py", "repo_id": "transformers", "token_count": 14562 }
522
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/sam2_video/modular_sam2_video.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_sam2_video.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy from typing import Optional, Union import numpy as np import torch from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType from ...utils.import_utils import requires from ...video_utils import VideoInput from .modeling_sam2_video import Sam2VideoInferenceSession @requires(backends=("torch",)) class Sam2VideoProcessor(ProcessorMixin): r""" Constructs a SAM2 processor which wraps a SAM2 image processor and an 2D points & Bounding boxes processor into a single processor. [`Sam2VideoProcessor`] offers all the functionalities of [`Sam2ImageProcessorFast`] and [`Sam2VideoProcessor`]. See the docstring of [`~Sam2ImageProcessorFast.__call__`] and [`~Sam2VideoProcessor.__call__`] for more information. Args: image_processor (`Sam2ImageProcessorFast`): An instance of [`Sam2ImageProcessorFast`]. video_processor (`Sam2VideoVideoProcessor`): An instance of [`Sam2VideoVideoProcessor`]. target_size (`int`, *optional*): The target size (target_size, target_size) to which the image will be resized. point_pad_value (`int`, *optional*, defaults to -10): The value used for padding input points. """ attributes = ["image_processor", "video_processor"] image_processor_class = "Sam2ImageProcessorFast" video_processor_class = "Sam2VideoVideoProcessor" def __init__( self, image_processor, video_processor, target_size: Optional[int] = None, point_pad_value: int = -10, **kwargs ): super().__init__(image_processor, video_processor, **kwargs) self.point_pad_value = point_pad_value self.target_size = target_size if target_size is not None else self.image_processor.size["height"] def __call__( self, images: ImageInput = None, segmentation_maps: ImageInput = None, input_points: Optional[Union[list[list[list[list[float]]]], torch.Tensor]] = None, input_labels: Optional[Union[list[list[list[int]]], torch.Tensor]] = None, input_boxes: Optional[Union[list[list[list[float]]], torch.Tensor]] = None, original_sizes: Optional[Union[list[list[float]], torch.Tensor]] = None, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchEncoding: r""" This method uses [`Sam2VideoImageProcessorFast.__call__`] method to prepare image(s) for the model. It also prepares 2D points and bounding boxes for the model if they are provided. Args: images (`ImageInput`, *optional*): The image(s) to process. segmentation_maps (`ImageInput`, *optional*): The segmentation maps to process. input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*): The points to add to the frame. input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*): The labels for the points. input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*): The bounding boxes to add to the frame. original_sizes (`list[list[float]]`, `torch.Tensor`, *optional*): The original sizes of the images. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. **kwargs: Additional keyword arguments to pass to the image processor. Returns: A [`BatchEncoding`] with the following fields: - `pixel_values` (`torch.Tensor`): The processed image(s). - `original_sizes` (`list[list[float]]`): The original sizes of the images. - `reshaped_input_sizes` (`torch.Tensor`): The reshaped input sizes of the images. - `labels` (`torch.Tensor`): The processed segmentation maps (if provided). - `input_points` (`torch.Tensor`): The processed points. - `input_labels` (`torch.Tensor`): The processed labels. - `input_boxes` (`torch.Tensor`): The processed bounding boxes. """ if images is not None: encoding_image_processor = self.image_processor( images, segmentation_maps=segmentation_maps, return_tensors=return_tensors, **kwargs, ) elif original_sizes is not None: if isinstance(original_sizes, torch.Tensor): original_sizes = original_sizes.cpu().tolist() encoding_image_processor = BatchEncoding({"original_sizes": original_sizes}, tensor_type=return_tensors) else: raise ValueError("Either images or original_sizes must be provided") # pop arguments that are not used in the forward but used nevertheless original_sizes = encoding_image_processor["original_sizes"] # Check original_sizes is of length 1 or len(images) if images is not None and len(original_sizes) != 1 and len(original_sizes) != len(images): raise ValueError( "original_sizes must be of length 1 or len(images). If you are passing a single image, you must pass a single original_size." ) # Process input points, labels, and boxes if provided if input_points is not None or input_labels is not None or input_boxes is not None: # Validate and convert inputs to standardized format processed_points = self._validate_single_input( input_points, expected_depth=4, input_name="points", expected_format="[image level, object level, point level, point coordinates]", expected_coord_size=2, ) processed_labels = self._validate_single_input( input_labels, expected_depth=3, input_name="labels", expected_format="[image level, object level, point level]", ) processed_boxes = self._validate_single_input( input_boxes, expected_depth=3, input_name="boxes", expected_format="[image level, box level, box coordinates]", expected_coord_size=4, ) # Get padding requirements for all inputs if processed_points is not None: points_max_dims = self._get_nested_dimensions(processed_points)[:3] if processed_labels is not None: labels_max_dims = self._get_nested_dimensions(processed_labels)[:3] if processed_boxes is not None: boxes_max_dims = self._get_nested_dimensions(processed_boxes)[:2] # Ensure points and labels have consistent dimensions if processed_points is not None and processed_labels is not None: if points_max_dims != labels_max_dims: raise ValueError( "Input points and labels have inconsistent dimensions. Please ensure they have the same dimensions." ) # Check that boxes don't need padding (model limitation) if processed_boxes is not None and len(processed_boxes) >= 2: if any(len(img_boxes) < boxes_max_dims[1] for img_boxes in processed_boxes): raise ValueError( "Input boxes have inconsistent dimensions that would require padding, " "but boxes cannot be padded due to model limitations. " "Please ensure all images have the same number of boxes." ) # Pad and normalize all inputs to final tensor format if processed_points is not None: padded_points = self._pad_nested_list(processed_points, points_max_dims + [2]) final_points = torch.tensor(padded_points, dtype=torch.float32) self._normalize_tensor_coordinates(final_points, original_sizes, preserve_padding=True) encoding_image_processor.update({"input_points": final_points}) if processed_labels is not None: padded_labels = self._pad_nested_list(processed_labels, labels_max_dims) final_labels = torch.tensor(padded_labels, dtype=torch.int64) encoding_image_processor.update({"input_labels": final_labels}) if processed_boxes is not None: final_boxes = torch.tensor(processed_boxes, dtype=torch.float32) self._normalize_tensor_coordinates(final_boxes, original_sizes, is_bounding_box=True) encoding_image_processor.update({"input_boxes": final_boxes}) return encoding_image_processor def _normalize_coordinates( self, target_size: int, coords: "torch.Tensor", original_size, is_bounding_box=False ) -> "torch.Tensor": """ Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format. Args: target_size (`int`): The target size of the image. coords (`torch.Tensor`): The coordinates to be normalized. original_size (`tuple`): The original size of the image. is_bounding_box (`bool`, *optional*, defaults to `False`): Whether the coordinates are bounding boxes. """ old_h, old_w = original_size new_h, new_w = target_size, target_size coords = deepcopy(coords).float() if is_bounding_box: coords = coords.reshape(-1, 2, 2) coords[..., 0] = coords[..., 0] * (new_w / old_w) coords[..., 1] = coords[..., 1] * (new_h / old_h) if is_bounding_box: coords = coords.reshape(-1, 4) return coords def _convert_to_nested_list(self, data, expected_depth, current_depth=0): """ Recursively convert various input formats (tensors, numpy arrays, lists) to nested lists. Args: data: Input data in any format expected_depth: Expected nesting depth current_depth: Current depth in recursion Returns: Nested list representation of the data """ if data is None: return None # Convert tensor/numpy to list if we're at a leaf level or if it's a multi-dimensional array if isinstance(data, torch.Tensor): # PyTorch tensor if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small tensor return data.numpy().tolist() else: return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data] elif isinstance(data, np.ndarray): # NumPy array if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small array return data.tolist() else: return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data] elif isinstance(data, list): if current_depth == expected_depth: # We've reached the expected depth, return as is return data else: # Continue recursion return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data] elif isinstance(data, (int, float)): return data else: raise ValueError(f"Unsupported data type: {type(data)}") def _get_nested_dimensions(self, nested_list, max_dims=None): """ Get the maximum dimensions at each level of nesting. Args: nested_list (`list`): Nested list structure. max_dims (`list`, *optional*): Current maximum dimensions (for recursion). Returns: `list`: A list of maximum dimensions for each nesting level. """ if max_dims is None: max_dims = [] if not isinstance(nested_list, list): return max_dims if len(max_dims) == 0: max_dims.append(len(nested_list)) else: max_dims[0] = max(max_dims[0], len(nested_list)) if len(nested_list) > 0: for item in nested_list: if isinstance(item, list): sub_dims = self._get_nested_dimensions(item) # Merge sub_dims into max_dims for i, dim in enumerate(sub_dims): if i + 1 >= len(max_dims): max_dims.append(dim) else: max_dims[i + 1] = max(max_dims[i + 1], dim) return max_dims def _pad_nested_list(self, nested_list, target_dims, current_level=0, pad_value=None): """ Recursively pad a nested list to match target dimensions. Args: nested_list (`list`): Nested list to pad. target_dims (`list`): Target dimensions for each level. current_level (`int`, *optional*, defaults to 0): Current nesting level. pad_value (`int`, *optional*): Value to use for padding. Returns: `list`: The padded nested list. """ if pad_value is None: pad_value = self.point_pad_value if current_level >= len(target_dims): return nested_list # Ensure we have a list if not isinstance(nested_list, list): nested_list = [nested_list] # Pad current level current_size = len(nested_list) target_size = target_dims[current_level] # Pad with appropriate values if current_level == len(target_dims) - 1: # At the coordinate level, pad with pad_value nested_list.extend([pad_value] * (target_size - current_size)) else: # At higher levels, pad with nested structures if current_size > 0: # Create appropriately sized template if current_level < len(target_dims) - 2: # For non-coordinate levels, create empty nested structure template_dims = target_dims[current_level + 1 :] template = self._create_empty_nested_structure(template_dims, pad_value) else: # For coordinate level, create list of pad_values template = [pad_value] * target_dims[current_level + 1] nested_list.extend([deepcopy(template) for _ in range(target_size - current_size)]) else: # Create from scratch template_dims = target_dims[current_level + 1 :] template = self._create_empty_nested_structure(template_dims, pad_value) nested_list.extend([deepcopy(template) for _ in range(target_size)]) # Recursively pad sublists if current_level < len(target_dims) - 1: for i in range(len(nested_list)): if isinstance(nested_list[i], list): nested_list[i] = self._pad_nested_list(nested_list[i], target_dims, current_level + 1, pad_value) return nested_list def _create_empty_nested_structure(self, dims, pad_value): """ Create an empty nested structure with given dimensions filled with pad_value. Args: dims (`list`): The dimensions of the nested structure. pad_value (`int`): The value to fill the structure with. """ if len(dims) == 1: return [pad_value] * dims[0] else: return [self._create_empty_nested_structure(dims[1:], pad_value) for _ in range(dims[0])] def _get_nesting_level(self, input_list): """ Get the nesting level of a list structure. Args: input_list (`list`): The list to get the nesting level of. """ if isinstance(input_list, list): if len(input_list) == 0: return 1 return 1 + self._get_nesting_level(input_list[0]) elif isinstance(input_list, (np.ndarray, torch.Tensor)): # For arrays/tensors, the nesting level is the number of dimensions return len(input_list.shape) return 0 def _validate_single_input( self, data: Union[torch.Tensor, np.ndarray, list], expected_depth: int, input_name: str, expected_format: str, expected_coord_size: Optional[int] = None, ) -> list: """ Validate a single input by ensuring proper nesting and raising an error if the input is not valid. Args: data (`torch.Tensor`, `np.ndarray`, or `list`): Input data to process. expected_depth (`int`): Expected nesting depth. input_name (`str`): Name of the input for error messages. expected_format (`str`): The expected format of the input. expected_coord_size (`int`, *optional*): Expected coordinate size (2 for points, 4 for boxes, None for labels). . """ if data is None: return None # Handle tensors and numpy arrays first if isinstance(data, (torch.Tensor, np.ndarray)): # For tensors/arrays, we can directly check the number of dimensions if data.ndim != expected_depth: raise ValueError( f"Input {input_name} must be a tensor/array with {expected_depth} dimensions. The expected nesting format is {expected_format}. Got {data.ndim} dimensions." ) elif expected_coord_size is not None: if data.shape[-1] != expected_coord_size: raise ValueError( f"Input {input_name} must be a tensor/array with {expected_coord_size} as the last dimension, got {data.shape[-1]}." ) return self._convert_to_nested_list(data, expected_depth) # Handle nested lists if isinstance(data, list): current_depth = self._get_nesting_level(data) if current_depth != expected_depth: raise ValueError( f"Input {input_name} must be a nested list with {expected_depth} levels. The expected nesting format is {expected_format}. Got {current_depth} levels." ) return self._convert_to_nested_list(data, expected_depth) def _normalize_tensor_coordinates(self, tensor, original_sizes, is_bounding_box=False, preserve_padding=False): """ Helper method to normalize coordinates in a tensor across multiple images. Args: tensor (`torch.Tensor`): Input tensor with coordinates. original_sizes (`list`): Original image sizes. is_bounding_box (`bool`, *optional*, defaults to `False`): Whether coordinates are bounding boxes. preserve_padding (`bool`, *optional*, defaults to `False`): Whether to preserve padding values (for points). """ if preserve_padding: # For points: avoid normalizing pad values mask = tensor != self.point_pad_value coord_mask = mask.all(dim=-1, keepdim=True) for img_idx in range(len(original_sizes)): if img_idx < tensor.shape[0]: original_size = original_sizes[img_idx] if img_idx < len(original_sizes) else original_sizes[0] normalized_coords = self._normalize_coordinates( self.target_size, tensor[img_idx], original_size, is_bounding_box=is_bounding_box ) if preserve_padding: # Only update non-padded values img_mask = coord_mask[img_idx] tensor[img_idx] = torch.where( img_mask.expand_as(tensor[img_idx]), normalized_coords, tensor[img_idx] ) else: tensor[img_idx] = normalized_coords def post_process_masks( self, masks, original_sizes, mask_threshold=0.0, binarize=True, max_hole_area=0.0, max_sprinkle_area=0.0, apply_non_overlapping_constraints=False, **kwargs, ): """ Remove padding and upscale masks to the original image size. Args: masks (`Union[List[torch.Tensor], List[np.ndarray]]`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. mask_threshold (`float`, *optional*, defaults to 0.0): Threshold for binarization and post-processing operations. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. max_hole_area (`float`, *optional*, defaults to 0.0): The maximum area of a hole to fill. max_sprinkle_area (`float`, *optional*, defaults to 0.0): The maximum area of a sprinkle to fill. apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`): Whether to apply non-overlapping constraints to the masks. Returns: (`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ return self.image_processor.post_process_masks( masks, original_sizes, mask_threshold, binarize, max_hole_area, max_sprinkle_area, apply_non_overlapping_constraints, **kwargs, ) def init_video_session( self, video: Optional[VideoInput] = None, inference_device: Union[str, "torch.device"] = "cpu", inference_state_device: Union[str, "torch.device"] = None, processing_device: Union[str, "torch.device"] = None, video_storage_device: Union[str, "torch.device"] = None, max_vision_features_cache_size: int = 1, dtype: torch.dtype = torch.float32, ): """ Initializes a video session for inference. If a video is provided (async inference), the video will be processed and stored on the `video_storage_device`. Args: video (`VideoInput`, *optional*): The video to process. No need to provide when streaming. inference_device (`str` or `torch.device`, *optional*, defaults to "cpu"): The device to use for inference. inference_state_device (`str` or `torch.device`, *optional*): The device to store the inference state on. processing_device (`str` or `torch.device`, *optional*): The device to use for video processing. video_storage_device (`str` or `torch.device`, *optional*): The device to store the processed video frames on. max_vision_features_cache_size (`int`, *optional*, defaults to 1): The maximum number of vision features to cache. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The torch dtype to use for the whole session. """ video_storage_device = video_storage_device if video_storage_device is not None else inference_device inference_state_device = inference_state_device if inference_state_device is not None else inference_device processing_device = processing_device if processing_device is not None else inference_device pixel_values_video = None video_height = None video_width = None if video is not None: processed_video = self.video_processor(videos=video, device=processing_device, return_tensors="pt") pixel_values_video = processed_video.pixel_values_videos[0] video_height = processed_video.original_sizes[0][0] video_width = processed_video.original_sizes[0][1] inference_session = Sam2VideoInferenceSession( video=pixel_values_video, video_height=video_height, video_width=video_width, inference_device=inference_device, video_storage_device=video_storage_device, inference_state_device=inference_state_device, dtype=dtype, max_vision_features_cache_size=max_vision_features_cache_size, ) return inference_session def add_inputs_to_inference_session( self, inference_session: Sam2VideoInferenceSession, frame_idx: int, obj_ids: Union[list[int], int], input_points: Optional[Union[list[list[list[list[float]]]], torch.Tensor]] = None, input_labels: Optional[Union[list[list[list[int]]], torch.Tensor]] = None, input_boxes: Optional[Union[list[list[list[float]]], torch.Tensor]] = None, input_masks: Optional[Union[np.ndarray, torch.Tensor, list[np.ndarray], list[torch.Tensor]]] = None, original_size: Optional[tuple[int, int]] = None, clear_old_inputs: bool = True, ) -> Sam2VideoInferenceSession: """ Process new points, boxes, or masks for a video frame and add them to the inference session. Args: inference_session (`Sam2VideoInferenceSession`): The inference session for the video. frame_idx (`int`): The index of the frame to process. obj_ids (`list[int]` or `int`): The object ID(s) to associate with the points or box. These can be any integers and can be reused later on to specify an object. input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*): The points to add to the frame. input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*): The labels for the points. input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*): The bounding boxes to add to the frame. input_masks (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, or `list[torch.Tensor]`, *optional*): The mask(s) to add to the frame. original_size (`tuple[int, int]`, *optional*): The original size of the video. Provide when streaming. clear_old_inputs (`bool`, *optional*, defaults to `True`): Whether to clear old inputs for the object. """ if isinstance(obj_ids, int): obj_ids = [obj_ids] # Validate inputs if (input_points is not None) != (input_labels is not None): raise ValueError("points and labels must be provided together") if input_points is None and input_boxes is None and input_masks is None: raise ValueError("at least one of points, boxes, or masks must be provided as input") if input_masks is not None and (input_points is not None or input_boxes is not None): raise ValueError("masks cannot be provided together with points or boxes") if input_masks is not None: return self.process_new_mask_for_video_frame(inference_session, frame_idx, obj_ids, input_masks) else: return self.process_new_points_or_boxes_for_video_frame( inference_session, frame_idx, obj_ids, input_points, input_labels, input_boxes, original_size, clear_old_inputs, ) def process_new_points_or_boxes_for_video_frame( self, inference_session: Sam2VideoInferenceSession, frame_idx: int, obj_ids: list[int], input_points: Optional[Union[list[list[list[list[float]]]], torch.Tensor]] = None, input_labels: Optional[Union[list[list[list[int]]], torch.Tensor]] = None, input_boxes: Optional[Union[list[list[list[float]]], torch.Tensor]] = None, original_size: Optional[tuple[int, int]] = None, clear_old_inputs: bool = True, ) -> Sam2VideoInferenceSession: """ Process new points or boxes for a video frame and add them to the inference session. Args: inference_session (`Sam2VideoInferenceSession`): The inference session for the video. frame_idx (`int`): The index of the frame to process. obj_ids (`list[int]`): The object ID(s) to associate with the points or box. These can be any integers and can be reused later on to specify an object. input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*): The points to add to the frame. input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*): The labels for the points. input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*): The bounding boxes to add to the frame. original_size (`tuple[int, int]`, *optional*): The original size of the video. Provide when streaming. clear_old_inputs (`bool`, *optional*, defaults to `True`): Whether to clear old inputs for the object. """ if original_size is not None: inference_session.video_height = original_size[0] inference_session.video_width = original_size[1] elif inference_session.video_height is None or inference_session.video_width is None: raise ValueError("original_size must be provided when adding points or boxes on a first streamed frame") original_sizes = [[inference_session.video_height, inference_session.video_width]] encoded_inputs = self( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, original_sizes=original_sizes, return_tensors="pt", ) input_points = encoded_inputs.get("input_points", None) input_labels = encoded_inputs.get("input_labels", None) input_boxes = encoded_inputs.get("input_boxes", None) if input_points is not None: if input_points.shape[1] != len(obj_ids): raise ValueError( f"Number of object ids ({len(obj_ids)}) does not match number of points ({input_points.shape[1]})" ) else: input_points = torch.zeros(1, len(obj_ids), 0, 2, dtype=torch.float32) if input_labels is not None: if input_labels.shape[1] != len(obj_ids): raise ValueError( f"Number of object ids ({len(obj_ids)}) does not match number of labels ({input_labels.shape[1]})" ) else: input_labels = torch.zeros(1, len(obj_ids), 0, dtype=torch.int32) if input_boxes is not None: if input_boxes.shape[1] != len(obj_ids): raise ValueError( f"Number of object ids ({len(obj_ids)}) does not match number of boxes ({input_boxes.shape[1]})" ) if input_boxes is not None: if not clear_old_inputs: raise ValueError( "cannot add box without clearing old points, since " "box prompt must be provided before any point prompt " "(please use clear_old_points=True instead)" ) box_coords = input_boxes.reshape(1, -1, 2, 2) box_labels = torch.tensor([2, 3], dtype=torch.int32) box_labels = box_labels.reshape(1, -1, 2) input_points = torch.cat([box_coords, input_points], dim=2) input_labels = torch.cat([box_labels, input_labels], dim=2) for obj_id, idx in zip(obj_ids, range(len(obj_ids))): obj_idx = inference_session.obj_id_to_idx(obj_id) input_points_for_obj = input_points[:, idx, :, :].unsqueeze(1) input_labels_for_obj = input_labels[:, idx, :].unsqueeze(1) # Handle existing points if not clear_old_inputs: existing_points = inference_session.point_inputs_per_obj[obj_idx].get(frame_idx, None) if existing_points is not None: # Concatenate with existing points input_points_for_obj = torch.cat( [existing_points["point_coords"].to(input_points_for_obj.device), input_points_for_obj], dim=2 ) input_labels_for_obj = torch.cat( [existing_points["point_labels"].to(input_labels_for_obj.device), input_labels_for_obj], dim=2 ) point_inputs = { "point_coords": input_points_for_obj, "point_labels": input_labels_for_obj, } inference_session.add_point_inputs(obj_idx, frame_idx, point_inputs) inference_session.remove_mask_inputs(obj_idx, frame_idx) # Clear any mask inputs inference_session.obj_with_new_inputs = obj_ids def process_new_mask_for_video_frame( self, inference_session: Sam2VideoInferenceSession, frame_idx: int, obj_ids: list[int], input_masks: Union[np.ndarray, torch.Tensor, list[np.ndarray], list[torch.Tensor]], ): """ Add new mask to a frame and add them to the inference session. Args: inference_session (`Sam2VideoInferenceSession`): The inference session for the video. frame_idx (`int`): The index of the frame to process. obj_ids (`list[int]`): The object ID(s) to associate with the mask. These can be any integers and can be reused later on to specify an object. input_masks (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, or `list[torch.Tensor]`): The mask(s) to add to the frame. """ if not isinstance(input_masks, list): input_masks = [input_masks] if len(input_masks) != len(obj_ids): raise ValueError( f"Number of object ids ({len(obj_ids)}) does not match number of masks ({len(input_masks)})" ) for obj_id, mask in zip(obj_ids, input_masks): obj_idx = inference_session.obj_id_to_idx(obj_id) device = inference_session.inference_device # Process mask if not isinstance(mask, torch.Tensor): mask = torch.tensor(mask, dtype=torch.bool) nb_dim = mask.dim() if nb_dim > 4 or nb_dim < 2: raise ValueError(f"Mask has an unsupported number of dimensions: {nb_dim}") for i in range(4 - nb_dim): mask = mask.unsqueeze(0) mask_H, mask_W = mask.shape[-2:] mask_inputs_orig = mask.to(device) mask_inputs_orig = mask_inputs_orig.float().to(device) # Resize mask if needed if mask_H != self.target_size or mask_W != self.target_size: mask_inputs = torch.nn.functional.interpolate( mask_inputs_orig, size=(self.target_size, self.target_size), align_corners=False, mode="bilinear", antialias=True, ) mask_inputs = (mask_inputs >= 0.5).float() else: mask_inputs = mask_inputs_orig inference_session.add_mask_inputs(obj_idx, frame_idx, mask_inputs) inference_session.remove_point_inputs(obj_idx, frame_idx) # Clear any point inputs inference_session.obj_with_new_inputs = obj_ids __all__ = ["Sam2VideoProcessor"]
transformers/src/transformers/models/sam2_video/processing_sam2_video.py/0
{ "file_path": "transformers/src/transformers/models/sam2_video/processing_sam2_video.py", "repo_id": "transformers", "token_count": 17679 }
523
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for Segformer.""" from typing import Optional, Union from transformers.models.beit.image_processing_beit_fast import BeitFastImageProcessorKwargs, BeitImageProcessorFast from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import ( group_images_by_shape, reorder_images, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, ) from ...processing_utils import Unpack from ...utils import ( TensorType, is_torch_available, is_torchvision_available, is_torchvision_v2_available, ) if is_torch_available(): import torch if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F elif is_torchvision_available(): from torchvision.transforms import functional as F class SegformerFastImageProcessorKwargs(BeitFastImageProcessorKwargs): pass class SegformerImageProcessorFast(BeitImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_DEFAULT_MEAN image_std = IMAGENET_DEFAULT_STD size = {"height": 512, "width": 512} do_resize = True do_rescale = True rescale_factor = 1 / 255 do_normalize = True do_reduce_labels = False do_center_crop = None crop_size = None def _preprocess_image_like_inputs( self, images: ImageInput, segmentation_maps: Optional[ImageInput], do_convert_rgb: bool, input_data_format: ChannelDimension, device: Optional[Union[str, "torch.device"]] = None, **kwargs: Unpack[SegformerFastImageProcessorKwargs], ) -> BatchFeature: """ Preprocess image-like inputs. """ images = self._prepare_image_like_inputs( images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device ) images_kwargs = kwargs.copy() images_kwargs["do_reduce_labels"] = False batch_feature = self._preprocess(images, **images_kwargs) if segmentation_maps is not None: processed_segmentation_maps = self._prepare_image_like_inputs( images=segmentation_maps, expected_ndims=2, do_convert_rgb=False, input_data_format=ChannelDimension.FIRST, ) segmentation_maps_kwargs = kwargs.copy() segmentation_maps_kwargs.update( { "do_normalize": False, "do_rescale": False, # Nearest interpolation is used for segmentation maps instead of BILINEAR. "interpolation": F.InterpolationMode.NEAREST_EXACT if is_torchvision_v2_available() else F.InterpolationMode.NEAREST, } ) processed_segmentation_maps = self._preprocess( images=processed_segmentation_maps, **segmentation_maps_kwargs ).pixel_values batch_feature["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64) return batch_feature def _preprocess( self, images: list["torch.Tensor"], do_reduce_labels: bool, interpolation: Optional["F.InterpolationMode"], do_resize: bool, do_rescale: bool, do_normalize: bool, size: SizeDict, rescale_factor: float, image_mean: Union[float, list[float]], image_std: Union[float, list[float]], disable_grouping: bool, return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: # Return type can be list if return_tensors=None if do_reduce_labels: images = self.reduce_label(images) # Apply reduction if needed # Group images by size for batched resizing resized_images = images if do_resize: grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): resized_stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation) resized_images_grouped[shape] = resized_stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing (rescale/normalize) # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) # Stack images into a single tensor if return_tensors is set processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["SegformerImageProcessorFast"]
transformers/src/transformers/models/segformer/modular_segformer.py/0
{ "file_path": "transformers/src/transformers/models/segformer/modular_segformer.py", "repo_id": "transformers", "token_count": 2538 }
524
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for SigLIP2.""" import math from functools import lru_cache from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import ( convert_to_rgb, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): from PIL import Image @lru_cache(maxsize=256) def get_image_size_for_max_num_patches( image_height: int, image_width: int, patch_size: int, max_num_patches: int, eps: float = 1e-5 ) -> tuple[int, int]: """ Determine image size based on max number of patches, ensure dimensions are divisible by patch size and image is at least 1 patch. Args: image_height (`int`): Original image height. image_width (`int`): Original image width. patch_size (`int`): Patch size for processing. max_num_patches (`int`): Maximum number of patches. eps (`float`): Small threshold for binary search. Returns: Tuple: (target_height, target_width) """ def get_scaled_image_size(scale: float, size: int, patch_size: int) -> int: scaled_size = size * scale scaled_size = math.ceil(scaled_size / patch_size) * patch_size # make divisible by patch_size scaled_size = max(patch_size, scaled_size) # ensure at least 1 patch return int(scaled_size) # Binary search for optimal scale scale_min, scale_max = eps / 10, 100.0 while (scale_max - scale_min) >= eps: scale = (scale_min + scale_max) / 2 target_height = get_scaled_image_size(scale, image_height, patch_size) target_width = get_scaled_image_size(scale, image_width, patch_size) num_patches = (target_height / patch_size) * (target_width / patch_size) if num_patches <= max_num_patches: scale_min = scale else: scale_max = scale scale = scale_min target_height = get_scaled_image_size(scale, image_height, patch_size) target_width = get_scaled_image_size(scale, image_width, patch_size) return target_height, target_width def convert_image_to_patches(image: np.ndarray, patch_size: int) -> np.ndarray: """ Convert 3D array image of shape (image_height, image_width, num_channels) into 2D array of patches of shape (num_patches_height * num_patches_width, patch_size * patch_size * num_channels). """ image_height, image_width, num_channels = image.shape num_patches_height = image_height // patch_size num_patches_width = image_width // patch_size patched_image = image.reshape(num_patches_height, patch_size, num_patches_width, patch_size, num_channels) patched_image = patched_image.transpose(0, 2, 1, 3, 4) patched_image = patched_image.reshape(num_patches_height * num_patches_width, -1) return patched_image def pad_along_first_dim(array: np.ndarray, target_length: int, pad_value: int = 0) -> tuple[np.ndarray, np.ndarray]: """ Pad the array along the first dimension. """ current_length = array.shape[0] padding_length = target_length - current_length mask = np.ones((target_length,), dtype=np.int32) if padding_length > 0: paddings = [(0, padding_length)] + [(0, 0)] * (array.ndim - 1) array = np.pad(array, paddings, mode="constant", constant_values=pad_value) mask[-padding_length:] = 0 return array, mask class Siglip2ImageProcessor(BaseImageProcessor): r""" Constructs a SigLIP2 image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's dimensions to fit `max_num_patches` according to given `patch_size`. Can be overridden by `do_resize` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image by the specified mean and standard deviation. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch the image will be split to. max_num_patches (`int`, *optional*, defaults to 256): The image will be resized to have at most this number of patches, and then padded in "patch" dimension to match this number exactly. """ model_input_names = ["pixel_values", "pixel_attention_mask", "spatial_shapes"] def __init__( self, do_resize: bool = True, resample: "PILImageResampling" = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: Optional[bool] = None, patch_size: int = 16, max_num_patches: int = 256, **kwargs, ): super().__init__(**kwargs) image_mean = image_mean if image_mean is not None else [0.5, 0.5, 0.5] image_std = image_std if image_std is not None else [0.5, 0.5, 0.5] self.do_resize = do_resize self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb self.patch_size = patch_size self.max_num_patches = max_num_patches @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, resample: Optional["PILImageResampling"] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, do_convert_rgb: Optional[bool] = None, patch_size: Optional[int] = None, max_num_patches: Optional[int] = None, ) -> "Image.Image": """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. patch_size (`int`, *optional*, defaults to `self.patch_size`): Patch size for processing, same as the patch size used in the model. max_num_patches (`int`, *optional*, defaults to `self.max_num_patches`): Maximum number of patches per image, the image will be resized to have at most this number of patches. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb patch_size = patch_size if patch_size is not None else self.patch_size max_num_patches = max_num_patches if max_num_patches is not None else self.max_num_patches # Explicitly specify data format to be channels last for image preprocessing. # Image processor does not support different output formats, because it returns patches. data_format = ChannelDimension.LAST images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, ) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) pixel_masks = [] pixel_values = [] spatial_shapes = [] for image in images: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) if do_resize: height, width = get_image_size_for_max_num_patches( image_height=image.shape[0], image_width=image.shape[1], patch_size=patch_size, max_num_patches=max_num_patches, ) image = resize(image=image, size=(height, width), resample=resample, input_data_format=data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=data_format) patches = convert_image_to_patches(image, patch_size) patches, mask = pad_along_first_dim(patches, max_num_patches) num_patches_height = image.shape[0] // patch_size num_patches_width = image.shape[1] // patch_size spatial_shapes.append((num_patches_height, num_patches_width)) pixel_values.append(patches) pixel_masks.append(mask) batch_feature = BatchFeature( data={ "pixel_values": pixel_values, "pixel_attention_mask": pixel_masks, "spatial_shapes": spatial_shapes, }, tensor_type=return_tensors, ) return batch_feature __all__ = ["Siglip2ImageProcessor"]
transformers/src/transformers/models/siglip2/image_processing_siglip2.py/0
{ "file_path": "transformers/src/transformers/models/siglip2/image_processing_siglip2.py", "repo_id": "transformers", "token_count": 6680 }
525
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import numpy as np from ...image_processing_utils import ( BatchFeature, get_size_dict, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, SizeDict, ) from ...processing_utils import Unpack, VideosKwargs from ...utils import ( TensorType, is_torch_available, is_torchvision_available, is_torchvision_v2_available, is_vision_available, ) from ...utils.import_utils import requires from ...video_processing_utils import ( BaseVideoProcessor, ) from ...video_utils import VideoMetadata, group_videos_by_shape, reorder_videos if is_vision_available(): from ...image_utils import PILImageResampling if is_torchvision_available(): if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F if is_torch_available(): import torch from ...utils import logging logger = logging.get_logger(__name__) DEFAULT_SYSTEM_MESSAGE = "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language." DEFAULT_VIDEO_INTRO = ( "You are provided the following series of {frame_count} frames from a {video_duration} [H:MM:SS] video.\n" ) DEFAULT_MEDIA_OUTTRO = "\n\n" FRAME_TIMESTAMP_MESSAGE = "\nFrame from {timestamp}:" MAX_IMAGE_SIZE = 4096 # 4k resolution as absolute maximum def get_max_height_width(videos: list["torch.Tensor"]) -> list[int]: """ Get the maximum height and width across all videos in a batch. """ max_height = max_width = float("-inf") for video in videos: height, width = video.size()[-2:] max_height = max(height, max_height) max_width = max(width, max_width) return (max_height, max_width) def get_resize_output_image_size( video, resolution_max_side: int, ) -> tuple[int, int]: """ Get the output size of the video after resizing given a dictionary specifying the max and min sizes. Args: video (`np.ndarray`): Video to resize. resolution_max_side (`int`): The longest edge of the video will be resized to this value. The shortest edge will be resized to keep the input aspect ratio. Returns: The output size of the video after resizing. """ height, width = video.size()[-2:] # Find the output size, when rescaling the longest edge to max_len and preserving the aspect ratio # The output size must be below the MAX_IMAGE_SIZE resolution_max_side = min(MAX_IMAGE_SIZE, resolution_max_side) resolution_max_side = max(height, width) if resolution_max_side is None else resolution_max_side aspect_ratio = width / height if width >= height: width = resolution_max_side height = int(width / aspect_ratio) if height % 2 != 0: height += 1 elif height > width: height = resolution_max_side width = int(height * aspect_ratio) if width % 2 != 0: width += 1 height = max(height, 1) width = max(width, 1) return height, width class SmolVLMVideoProcessorInitKwargs(VideosKwargs): max_image_size: dict[str, int] = None @requires(backends=("torchvision",)) class SmolVLMVideoProcessor(BaseVideoProcessor): resample = PILImageResampling.LANCZOS size = {"longest_edge": 4 * 364} max_image_size = {"longest_edge": 364} image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True do_pad = True do_sample_frames = False # Set to False for BC, recommended to set `True` in new models valid_kwargs = SmolVLMVideoProcessorInitKwargs model_input_names = ["pixel_values", "pixel_attention_mask"] def __init__(self, **kwargs: Unpack[SmolVLMVideoProcessorInitKwargs]): super().__init__(**kwargs) # For BC pop values from `config.video_sampling`. In official config `video_sampling` is guaranteed to be present # We check for `Noneness` only for certain tests such as `test_init_without_params` if "size" in kwargs and "video_sampling" in kwargs: kwargs["video_sampling"]["video_size"] = kwargs["size"] if "video_sampling" in kwargs: self.num_frames = kwargs["video_sampling"]["max_frames"] self.fps = kwargs["video_sampling"]["fps"] self.size = get_size_dict(kwargs["video_sampling"]["video_size"], default_to_square=self.default_to_square) def resize( self, video: "torch.Tensor", size: SizeDict, interpolation: "F.InterpolationMode" = None, antialias: bool = True, **kwargs, ) -> "torch.Tensor": """ Resize an video to `(size["height"], size["width"])`. Args: video (`torch.Tensor`): Video to resize. size (`SizeDict`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output video. resample (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): `InterpolationMode` filter to use when resizing the video e.g. `InterpolationMode.BICUBIC`. Returns: `torch.Tensor`: The resized video. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR if interpolation == F.InterpolationMode.LANCZOS: logger.warning_once( "You have used fast image processor with LANCZOS resample which not yet supported for torch.Tensor. " "BICUBIC resample will be used as an alternative. Please fall back to image processor if you " "want full consistency with the original model." ) interpolation = F.InterpolationMode.BICUBIC if size.longest_edge: # Resize the image so that the shortest edge or the longest edge is of the given size # while maintaining the aspect ratio of the original image. new_size = get_resize_output_image_size( video, resolution_max_side=size.longest_edge, ) elif size.height and size.width: new_size = (size.height, size.width) else: raise ValueError(f"Size must contain 'height' and 'width' keys, or 'longest_edge' key. Got {size}.") video = F.resize(video, new_size, interpolation=interpolation, antialias=antialias) # Resize again to match image processor when `do_image_splitting=False`. Frames have to be squared to `max_image_size` # NOTE: videos are always processoed without image splitting max_size = self.max_image_size["longest_edge"], self.max_image_size["longest_edge"] video = F.resize(video, max_size, interpolation=interpolation, antialias=antialias) return video def pad( self, video: "torch.Tensor", padded_size: tuple[int, int], max_num_frames: int, fill: int = 0, return_pixel_mask: bool = True, ): """Pads the sample with empty video to the padded_size Args: video (`torch.Tensor`): Video to pad. padded_size (`tuple[int, int]`): Height and width to pad. max_num_frames (`int`): The maximum number of frames to which video will be padded. fill (`int`, *optional*): The value to use for the padding. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. """ original_size = video.size()[-2:] padding_height = padded_size[0] - original_size[0] padding_width = padded_size[1] - original_size[1] padding_frame = max_num_frames - video.shape[0] if padding_width < 0 or padding_height < 0: raise ValueError( f"Padding dimensions are negative. Please make sure that the padded size is larger than the " f"original size. Got padded size: {padded_size}, original size: {original_size}." ) if original_size != padded_size: padding = [0, padding_width, 0, padding_height, 0, 0, 0, padding_frame] video = F.pad(video, padding, fill=fill) # Make a pixel mask for the video, where 1 indicates a valid pixel and 0 indicates padding. # Mask shape is (num_frames, height, width) so we omit the channel dim pixel_mask = None if return_pixel_mask: pixel_mask = torch.zeros_like(video[..., 0, :, :], dtype=torch.int64) pixel_mask[..., : original_size[0], : original_size[1]] = 1 return video, pixel_mask def sample_frames( self, video: "torch.Tensor", metadata: Union[VideoMetadata, dict], num_frames: Optional[int] = None, fps: Optional[Union[int, float]] = None, skip_secs: Optional[int] = 1, ): """ Video sampling function which: - Uses `num_frames` (if provided) or calculates it from `fps` and metadata. - Applies a basic center-skip if fewer frames than available, otherwise optionally skips `skip_secs` from both the start and end. - Uniformly samples the desired number of frames between the start and end indices. Args: video (`torch.Tensor`): Video that need to be sampled. metadata (`VideoMetadata`): Metadata of the video containing information about total duration, fps and total number of frames. num_frames (`int`, *optional*): Maximum number of frames to sample. Defaults to `self.num_frames`. fps (`int` or `float`, *optional*): Target frames to sample per second. Defaults to `self.fps`. skip_secs (`float`, *optional*, defaults to `1`): Number of seconds to skip from the start and end if the video is long enough. Returns: torch.Tensor: Sampled video frames. """ num_frames = num_frames if num_frames is not None else self.num_frames fps = fps if fps is not None else self.fps total_num_frames = video.shape[0] # Step 1) Estimate how many frames we'd sample at `target_fps`, fallback if target_fps <= 0 estimated_frames = int(round(fps * metadata["duration"])) # Step 2) desired_frames desired_frames = min(estimated_frames, num_frames) if desired_frames < 1: desired_frames = 1 # Step 3) center skip logic start_idx = 0 end_idx = total_num_frames - 1 if skip_secs > 0 and (metadata["duration"] - 2 * skip_secs) > (num_frames * fps): start_idx = int(skip_secs * metadata["fps"]) end_idx = int(total_num_frames - skip_secs * metadata["fps"]) start_idx = max(0, start_idx) end_idx = min(end_idx, total_num_frames - 1) if start_idx >= end_idx: start_idx, end_idx = 0, total_num_frames - 1 indices = np.linspace(start_idx, end_idx, desired_frames, dtype=int) indices = np.unique(indices) video = video[indices].contiguous() timestamps = [] for idx in indices: sec = idx / metadata["fps"] mm = int(sec // 60) ss = int(sec % 60) timestamps.append([mm, ss]) return video, timestamps, int(metadata["duration"]) def _preprocess( self, videos: list["torch.Tensor"], video_metadata: Union[list[VideoMetadata], list[dict]], do_convert_rgb: bool, do_resize: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], do_rescale: bool, rescale_factor: float, do_normalize: bool, do_pad: bool, do_sample_frames: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], fps: Optional[Union[int, float]] = None, num_frames: Optional[int] = None, skip_secs: Optional[int] = 0, return_tensors: Optional[Union[str, TensorType]] = None, device: Optional["torch.Tensor"] = None, **kwargs, ): # Group videos by size for batched resizing if do_sample_frames: if video_metadata[0] is None: raise ValueError( "Frame sampling is enabled but no video metadata was found. SmolVLM requires metadata to correctly sample frames. " "Please pass in `VideoMetadata` object per each input video or set `do_sample_frames=False`" ) processed_videos = [] timestamps_list, durations_list = [], [] for video, metadata in zip(videos, video_metadata): video, timestamps, duration = self.sample_frames(video, metadata, num_frames, fps, skip_secs) timestamps_list.append(timestamps) durations_list.append(duration) processed_videos.append(video) else: # Assume 24 fps by default and prepare timestamps for the whole video when all frames are sampled processed_videos = videos timestamps_list = [ [(int((idx / 24) // 60), int((idx / 24) % 60)) for idx in range(len(video))] for video in videos ] durations_list = [len(video) // 24 for video in videos] # We need to sample frames first before moving to device, if `do_sample_frames=True`. Otherwise # moving the whole video incurs high GPU mem usage for long videos if device is not None: videos = [video.to(device) for video in videos] grouped_videos, grouped_videos_index = group_videos_by_shape(processed_videos) resized_videos_grouped = {} for shape, stacked_videos in grouped_videos.items(): if do_convert_rgb: stacked_videos = self.convert_to_rgb(stacked_videos) if do_resize: stacked_videos = self.resize(stacked_videos, size=size, interpolation=interpolation) resized_videos_grouped[shape] = stacked_videos resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index) grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos) processed_videos_grouped = {} for shape, stacked_videos in grouped_videos.items(): stacked_videos = self.rescale_and_normalize( stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_videos_grouped[shape] = stacked_videos processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index) if do_pad: pad_size = get_max_height_width(processed_videos) max_num_frames = max(len(video) for video in processed_videos) grouped_videos, grouped_videos_index = group_videos_by_shape(processed_videos) processed_padded_mask_grouped = {} processed_videos_grouped = {} for shape, stacked_videos in grouped_videos.items(): stacked_videos, padded_masks = self.pad( stacked_videos, padded_size=pad_size, max_num_frames=max_num_frames ) processed_videos_grouped[shape] = stacked_videos processed_padded_mask_grouped[shape] = padded_masks processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index) pixel_attention_mask = reorder_videos(processed_padded_mask_grouped, grouped_videos_index) processed_videos = torch.stack(processed_videos, dim=0) if return_tensors else processed_videos data = {"pixel_values": processed_videos, "timestamps": timestamps_list, "durations": durations_list} if do_pad: data["pixel_attention_mask"] = ( torch.stack(pixel_attention_mask, dim=0) if do_pad and return_tensors is not None else pixel_attention_mask ) return BatchFeature(data, tensor_type=return_tensors) __all__ = ["SmolVLMVideoProcessor"]
transformers/src/transformers/models/smolvlm/video_processing_smolvlm.py/0
{ "file_path": "transformers/src/transformers/models/smolvlm/video_processing_smolvlm.py", "repo_id": "transformers", "token_count": 7190 }
526
# coding=utf-8 # Copyright 2023 The Fairseq Authors, Microsoft Research, and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SpeechT5 model configuration""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class SpeechT5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SpeechT5Model`]. It is used to instantiate a SpeechT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SpeechT5 [microsoft/speecht5_asr](https://huggingface.co/microsoft/speecht5_asr) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 81): Vocabulary size of the SpeechT5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed to the forward method of [`SpeechT5Model`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. encoder_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. encoder_ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. encoder_layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer decoder. decoder_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer decoder. decoder_layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. positional_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the text position encoding layers. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in the speech encoder pre-net. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the speech encoder pre-net. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the speech encoder pre-net. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the speech encoder pre-net. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the speech encoder pre-net. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the speech encoder pre-net. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://huggingface.co/papers/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0),: The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' num_mel_bins (`int`, *optional*, defaults to 80): Number of mel features used per input features. Used by the speech decoder pre-net. Should correspond to the value used in the [`SpeechT5Processor`] class. speech_decoder_prenet_layers (`int`, *optional*, defaults to 2): Number of layers in the speech decoder pre-net. speech_decoder_prenet_units (`int`, *optional*, defaults to 256): Dimensionality of the layers in the speech decoder pre-net. speech_decoder_prenet_dropout (`float`, *optional*, defaults to 0.5): The dropout probability for the speech decoder pre-net layers. speaker_embedding_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. speech_decoder_postnet_layers (`int`, *optional*, defaults to 5): Number of layers in the speech decoder post-net. speech_decoder_postnet_units (`int`, *optional*, defaults to 256): Dimensionality of the layers in the speech decoder post-net. speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5): Number of convolutional filter channels in the speech decoder post-net. speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5): The dropout probability for the speech decoder post-net layers. reduction_factor (`int`, *optional*, defaults to 2): Spectrogram length reduction factor for the speech decoder inputs. max_speech_positions (`int`, *optional*, defaults to 4000): The maximum sequence length of speech features that this model might ever be used with. max_text_positions (`int`, *optional*, defaults to 450): The maximum sequence length of text features that this model might ever be used with. encoder_max_relative_position (`int`, *optional*, defaults to 160): Maximum distance for relative position embedding in the encoder. use_guided_attention_loss (`bool`, *optional*, defaults to `True`): Whether to apply guided attention loss while training the TTS model. guided_attention_loss_num_heads (`int`, *optional*, defaults to 2): Number of attention heads the guided attention loss will be applied to. Use -1 to apply this loss to all attention heads. guided_attention_loss_sigma (`float`, *optional*, defaults to 0.4): Standard deviation for guided attention loss. guided_attention_loss_scale (`float`, *optional*, defaults to 10.0): Scaling coefficient for guided attention loss (also known as lambda). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import SpeechT5Model, SpeechT5Config >>> # Initializing a "microsoft/speecht5_asr" style configuration >>> configuration = SpeechT5Config() >>> # Initializing a model (with random weights) from the "microsoft/speecht5_asr" style configuration >>> model = SpeechT5Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "speecht5" attribute_map = {"num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers"} def __init__( self, vocab_size=81, hidden_size=768, encoder_layers=12, encoder_attention_heads=12, encoder_ffn_dim=3072, encoder_layerdrop=0.1, decoder_layers=6, decoder_ffn_dim=3072, decoder_attention_heads=12, decoder_layerdrop=0.1, hidden_act="gelu", positional_dropout=0.1, hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.1, initializer_range=0.02, layer_norm_eps=1e-5, scale_embedding=False, feat_extract_norm="group", feat_proj_dropout=0.0, feat_extract_activation="gelu", conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, pad_token_id=1, bos_token_id=0, eos_token_id=2, decoder_start_token_id=2, num_mel_bins=80, speech_decoder_prenet_layers=2, speech_decoder_prenet_units=256, speech_decoder_prenet_dropout=0.5, speaker_embedding_dim=512, speech_decoder_postnet_layers=5, speech_decoder_postnet_units=256, speech_decoder_postnet_kernel=5, speech_decoder_postnet_dropout=0.5, reduction_factor=2, max_speech_positions=4000, max_text_positions=450, encoder_max_relative_position=160, use_guided_attention_loss=True, guided_attention_loss_num_heads=2, guided_attention_loss_sigma=0.4, guided_attention_loss_scale=10.0, use_cache=True, is_encoder_decoder=True, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_layers = encoder_layers self.encoder_ffn_dim = encoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.encoder_layerdrop = encoder_layerdrop self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.decoder_attention_heads = decoder_attention_heads self.decoder_layerdrop = decoder_layerdrop self.hidden_act = hidden_act self.positional_dropout = positional_dropout self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.scale_embedding = scale_embedding self.feat_extract_norm = feat_extract_norm self.feat_proj_dropout = feat_proj_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks self.num_mel_bins = num_mel_bins self.speech_decoder_prenet_layers = speech_decoder_prenet_layers self.speech_decoder_prenet_units = speech_decoder_prenet_units self.speech_decoder_prenet_dropout = speech_decoder_prenet_dropout self.speaker_embedding_dim = speaker_embedding_dim self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout self.reduction_factor = reduction_factor self.max_speech_positions = max_speech_positions self.max_text_positions = max_text_positions self.encoder_max_relative_position = encoder_max_relative_position self.use_guided_attention_loss = use_guided_attention_loss self.guided_attention_loss_num_heads = guided_attention_loss_num_heads self.guided_attention_loss_sigma = guided_attention_loss_sigma self.guided_attention_loss_scale = guided_attention_loss_scale self.use_cache = use_cache self.is_encoder_decoder = is_encoder_decoder super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs, ) def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) class SpeechT5HifiGanConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SpeechT5HifiGanModel`]. It is used to instantiate a SpeechT5 HiFi-GAN vocoder model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SpeechT5 [microsoft/speecht5_hifigan](https://huggingface.co/microsoft/speecht5_hifigan) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: model_in_dim (`int`, *optional*, defaults to 80): The number of frequency bins in the input log-mel spectrogram. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the output audio will be generated, expressed in hertz (Hz). upsample_initial_channel (`int`, *optional*, defaults to 512): The number of input channels into the upsampling network. upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[4, 4, 4, 4]`): A tuple of integers defining the stride of each 1D convolutional layer in the upsampling network. The length of *upsample_rates* defines the number of convolutional layers and has to match the length of *upsample_kernel_sizes*. upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[8, 8, 8, 8]`): A tuple of integers defining the kernel size of each 1D convolutional layer in the upsampling network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match the length of *upsample_rates*. resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`): A tuple of integers defining the kernel sizes of the 1D convolutional layers in the multi-receptive field fusion (MRF) module. resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`): A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the multi-receptive field fusion (MRF) module. initializer_range (`float`, *optional*, defaults to 0.01): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. leaky_relu_slope (`float`, *optional*, defaults to 0.1): The angle of the negative slope used by the leaky ReLU activation. normalize_before (`bool`, *optional*, defaults to `True`): Whether or not to normalize the spectrogram before vocoding using the vocoder's learned mean and variance. Example: ```python >>> from transformers import SpeechT5HifiGan, SpeechT5HifiGanConfig >>> # Initializing a "microsoft/speecht5_hifigan" style configuration >>> configuration = SpeechT5HifiGanConfig() >>> # Initializing a model (with random weights) from the "microsoft/speecht5_hifigan" style configuration >>> model = SpeechT5HifiGan(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "hifigan" def __init__( self, model_in_dim=80, sampling_rate=16000, upsample_initial_channel=512, upsample_rates=[4, 4, 4, 4], upsample_kernel_sizes=[8, 8, 8, 8], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], initializer_range=0.01, leaky_relu_slope=0.1, normalize_before=True, **kwargs, ): self.model_in_dim = model_in_dim self.sampling_rate = sampling_rate self.upsample_initial_channel = upsample_initial_channel self.upsample_rates = upsample_rates self.upsample_kernel_sizes = upsample_kernel_sizes self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes self.initializer_range = initializer_range self.leaky_relu_slope = leaky_relu_slope self.normalize_before = normalize_before super().__init__(**kwargs) __all__ = ["SpeechT5Config", "SpeechT5HifiGanConfig"]
transformers/src/transformers/models/speecht5/configuration_speecht5.py/0
{ "file_path": "transformers/src/transformers/models/speecht5/configuration_speecht5.py", "repo_id": "transformers", "token_count": 9174 }
527
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import requests import torch from PIL import Image from transformers import SuperPointConfig, SuperPointForKeypointDetection, SuperPointImageProcessor def get_superpoint_config(): config = SuperPointConfig( encoder_hidden_sizes=[64, 64, 128, 128], decoder_hidden_size=256, keypoint_decoder_dim=65, descriptor_decoder_dim=256, keypoint_threshold=0.005, max_keypoints=-1, nms_radius=4, border_removal_distance=4, initializer_range=0.02, ) return config def create_rename_keys(config, state_dict): rename_keys = [] # Encoder weights rename_keys.append(("conv1a.weight", "encoder.conv_blocks.0.conv_a.weight")) rename_keys.append(("conv1b.weight", "encoder.conv_blocks.0.conv_b.weight")) rename_keys.append(("conv2a.weight", "encoder.conv_blocks.1.conv_a.weight")) rename_keys.append(("conv2b.weight", "encoder.conv_blocks.1.conv_b.weight")) rename_keys.append(("conv3a.weight", "encoder.conv_blocks.2.conv_a.weight")) rename_keys.append(("conv3b.weight", "encoder.conv_blocks.2.conv_b.weight")) rename_keys.append(("conv4a.weight", "encoder.conv_blocks.3.conv_a.weight")) rename_keys.append(("conv4b.weight", "encoder.conv_blocks.3.conv_b.weight")) rename_keys.append(("conv1a.bias", "encoder.conv_blocks.0.conv_a.bias")) rename_keys.append(("conv1b.bias", "encoder.conv_blocks.0.conv_b.bias")) rename_keys.append(("conv2a.bias", "encoder.conv_blocks.1.conv_a.bias")) rename_keys.append(("conv2b.bias", "encoder.conv_blocks.1.conv_b.bias")) rename_keys.append(("conv3a.bias", "encoder.conv_blocks.2.conv_a.bias")) rename_keys.append(("conv3b.bias", "encoder.conv_blocks.2.conv_b.bias")) rename_keys.append(("conv4a.bias", "encoder.conv_blocks.3.conv_a.bias")) rename_keys.append(("conv4b.bias", "encoder.conv_blocks.3.conv_b.bias")) # Keypoint Decoder weights rename_keys.append(("convPa.weight", "keypoint_decoder.conv_score_a.weight")) rename_keys.append(("convPb.weight", "keypoint_decoder.conv_score_b.weight")) rename_keys.append(("convPa.bias", "keypoint_decoder.conv_score_a.bias")) rename_keys.append(("convPb.bias", "keypoint_decoder.conv_score_b.bias")) # Descriptor Decoder weights rename_keys.append(("convDa.weight", "descriptor_decoder.conv_descriptor_a.weight")) rename_keys.append(("convDb.weight", "descriptor_decoder.conv_descriptor_b.weight")) rename_keys.append(("convDa.bias", "descriptor_decoder.conv_descriptor_a.bias")) rename_keys.append(("convDb.bias", "descriptor_decoder.conv_descriptor_b.bias")) return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_imgs(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im1 = Image.open(requests.get(url, stream=True).raw) url = "http://images.cocodataset.org/test-stuff2017/000000004016.jpg" im2 = Image.open(requests.get(url, stream=True).raw) return [im1, im2] @torch.no_grad() def convert_superpoint_checkpoint(checkpoint_url, pytorch_dump_folder_path, save_model, push_to_hub, test_mode=False): """ Copy/paste/tweak model's weights to our SuperPoint structure. """ print("Downloading original model from checkpoint...") config = get_superpoint_config() # load original state_dict from URL original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url) print("Converting model parameters...") # rename keys rename_keys = create_rename_keys(config, original_state_dict) new_state_dict = original_state_dict.copy() for src, dest in rename_keys: rename_key(new_state_dict, src, dest) # Load HuggingFace model model = SuperPointForKeypointDetection(config) model.load_state_dict(new_state_dict) model.eval() print("Successfully loaded weights in the model") # Check model outputs preprocessor = SuperPointImageProcessor() inputs = preprocessor(images=prepare_imgs(), return_tensors="pt") outputs = model(**inputs) # If test_mode is True, we check that the model outputs match the original results if test_mode: torch.count_nonzero(outputs.mask[0]) expected_keypoints_shape = (2, 830, 2) expected_scores_shape = (2, 830) expected_descriptors_shape = (2, 830, 256) expected_keypoints_values = torch.tensor([[480.0, 9.0], [494.0, 9.0], [489.0, 16.0]]) expected_scores_values = torch.tensor([0.0064, 0.0140, 0.0595, 0.0728, 0.5170, 0.0175, 0.1523, 0.2055, 0.0336]) expected_descriptors_value = torch.tensor(-0.1096) assert outputs.keypoints.shape == expected_keypoints_shape assert outputs.scores.shape == expected_scores_shape assert outputs.descriptors.shape == expected_descriptors_shape assert torch.allclose(outputs.keypoints[0, :3], expected_keypoints_values, atol=1e-3) assert torch.allclose(outputs.scores[0, :9], expected_scores_values, atol=1e-3) assert torch.allclose(outputs.descriptors[0, 0, 0], expected_descriptors_value, atol=1e-3) print("Model outputs match the original results!") if save_model: print("Saving model to local...") # Create folder to save model if not os.path.isdir(pytorch_dump_folder_path): os.mkdir(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) preprocessor.save_pretrained(pytorch_dump_folder_path) model_name = "magic-leap-community/superpoint" if push_to_hub: print(f"Pushing {model_name} to the hub...") model.push_to_hub(model_name) preprocessor.push_to_hub(model_name) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/magicleap/SuperPointPretrainedNetwork/raw/master/superpoint_v1.pth", type=str, help="URL of the original SuperPoint checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="model", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--save_model", action="store_true", help="Save model to local") parser.add_argument("--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub") args = parser.parse_args() convert_superpoint_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub )
transformers/src/transformers/models/superpoint/convert_superpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/superpoint/convert_superpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 2864 }
528
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Swin2SR Transformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Swin2SRConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Swin2SRModel`]. It is used to instantiate a Swin Transformer v2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Swin Transformer v2 [caidas/swin2sr-classicalsr-x2-64](https://huggingface.co/caidas/swin2sr-classicalsr-x2-64) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 64): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 1): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_channels_out (`int`, *optional*, defaults to `num_channels`): The number of output channels. If not set, it will be set to `num_channels`. embed_dim (`int`, *optional*, defaults to 180): Dimensionality of patch embedding. depths (`list(int)`, *optional*, defaults to `[6, 6, 6, 6, 6, 6]`): Depth of each layer in the Transformer encoder. num_heads (`list(int)`, *optional*, defaults to `[6, 6, 6, 6, 6, 6]`): Number of attention heads in each layer of the Transformer encoder. window_size (`int`, *optional*, defaults to 8): Size of windows. mlp_ratio (`float`, *optional*, defaults to 2.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. use_absolute_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to add absolute position embeddings to the patch embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. upscale (`int`, *optional*, defaults to 2): The upscale factor for the image. 2/3/4/8 for image super resolution, 1 for denoising and compress artifact reduction img_range (`float`, *optional*, defaults to 1.0): The range of the values of the input image. resi_connection (`str`, *optional*, defaults to `"1conv"`): The convolutional block to use before the residual connection in each stage. upsampler (`str`, *optional*, defaults to `"pixelshuffle"`): The reconstruction reconstruction module. Can be 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None. Example: ```python >>> from transformers import Swin2SRConfig, Swin2SRModel >>> # Initializing a Swin2SR caidas/swin2sr-classicalsr-x2-64 style configuration >>> configuration = Swin2SRConfig() >>> # Initializing a model (with random weights) from the caidas/swin2sr-classicalsr-x2-64 style configuration >>> model = Swin2SRModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "swin2sr" attribute_map = { "hidden_size": "embed_dim", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, image_size=64, patch_size=1, num_channels=3, num_channels_out=None, embed_dim=180, depths=[6, 6, 6, 6, 6, 6], num_heads=[6, 6, 6, 6, 6, 6], window_size=8, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-5, upscale=2, img_range=1.0, resi_connection="1conv", upsampler="pixelshuffle", **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_channels_out = num_channels if num_channels_out is None else num_channels_out self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.upscale = upscale self.img_range = img_range self.resi_connection = resi_connection self.upsampler = upsampler __all__ = ["Swin2SRConfig"]
transformers/src/transformers/models/swin2sr/configuration_swin2sr.py/0
{ "file_path": "transformers/src/transformers/models/swin2sr/configuration_swin2sr.py", "repo_id": "transformers", "token_count": 2701 }
529
# coding=utf-8 # Copyright 2018 The T5 authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert T5 checkpoint.""" import argparse from transformers import T5Config, T5ForConditionalGeneration, load_tf_weights_in_t5 from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): # Initialise PyTorch model config = T5Config.from_json_file(config_file) print(f"Building PyTorch model from configuration: {config}") model = T5ForConditionalGeneration(config) # Load weights from tf checkpoint load_tf_weights_in_t5(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
transformers/src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 735 }
530
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Table Transformer checkpoints with native (Transformers) backbone. URL: https://github.com/microsoft/table-transformer """ import argparse from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, ResNetConfig, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def create_rename_keys(config): # here we list all keys to be renamed (original name on the left, our name on the right) rename_keys = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight")) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight")) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias")) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean")) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var")) # stages for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.running_var", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv2.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.running_var", ) ) # all ResNet stages except the first one have a downsample as first layer if stage_idx != 0 and layer_idx == 0: rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", ) ) rename_keys.append( ( # "backbone.conv_encoder.model.encoder.stages.3.layers.0.shortcut.normalization.running_var" f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", ) ) # fmt: on for i in range(config.encoder_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def rename_key(state_dict, old, new): val = state_dict.pop(old) state_dict[new] = val def read_in_q_k_v(state_dict, is_panoptic=False): prefix = "" if is_panoptic: prefix = "detr." # first: transformer encoder for i in range(6): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6): # read in weights + bias of input projection layer of self-attention in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention in_proj_weight_cross_attn = state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias") # next, add query, keys and values (in that order) of cross-attention to the state dict state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :] state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256] state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :] state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512] state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :] state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:] def resize(image, checkpoint_url): width, height = image.size current_max_size = max(width, height) target_max_size = 800 if "detection" in checkpoint_url else 1000 scale = target_max_size / current_max_size resized_image = image.resize((int(round(scale * width)), int(round(scale * height)))) return resized_image def normalize(image): image = F.to_tensor(image) image = F.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) return image @torch.no_grad() def convert_table_transformer_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub): """ Copy/paste/tweak model's weights to our DETR structure. """ logger.info("Converting model...") # create HuggingFace model and load state dict backbone_config = ResNetConfig.from_pretrained( "microsoft/resnet-18", out_features=["stage1", "stage2", "stage3", "stage4"] ) config = TableTransformerConfig( backbone_config=backbone_config, use_timm_backbone=False, mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, ) # load original state dict state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") # rename keys for src, dest in create_rename_keys(config): rename_key(state_dict, src, dest) # query, key and value matrices need special treatment read_in_q_k_v(state_dict) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them prefix = "model." for key in state_dict.copy(): if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): val = state_dict.pop(key) state_dict[prefix + key] = val if "detection" in checkpoint_url: config.num_queries = 15 config.num_labels = 2 id2label = {0: "table", 1: "table rotated"} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} else: config.num_queries = 125 config.num_labels = 6 id2label = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} image_processor = DetrImageProcessor(format="coco_detection", size={"longest_edge": 800}) model = TableTransformerForObjectDetection(config) model.load_state_dict(state_dict) model.eval() # verify our conversion filename = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=filename) image = Image.open(file_path).convert("RGB") pixel_values = normalize(resize(image, checkpoint_url)).unsqueeze(0) outputs = model(pixel_values) if "detection" in checkpoint_url: expected_shape = (1, 15, 3) expected_logits = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) expected_boxes = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]]) else: expected_shape = (1, 125, 7) expected_logits = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) expected_boxes = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]]) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4) assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub...") model_name = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(model_name, revision="no_timm") image_processor.push_to_hub(model_name, revision="no_timm") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py/0
{ "file_path": "transformers/src/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py", "repo_id": "transformers", "token_count": 10228 }
531
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Time Series Transformer model.""" from typing import Callable, Optional, Union import numpy as np import torch from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...utils.deprecation import deprecate_kwarg from .configuration_time_series_transformer import TimeSeriesTransformerConfig if is_torch_flex_attn_available(): from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) class TimeSeriesFeatureEmbedder(nn.Module): """ Embed a sequence of categorical features. Args: cardinalities (`list[int]`): List of cardinalities of the categorical features. embedding_dims (`list[int]`): List of embedding dimensions of the categorical features. """ def __init__(self, cardinalities: list[int], embedding_dims: list[int]) -> None: super().__init__() self.num_features = len(cardinalities) self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)]) def forward(self, features: torch.Tensor) -> torch.Tensor: if self.num_features > 1: # we slice the last dimension, giving an array of length # self.num_features with shape (N,T) or (N) cat_feature_slices = torch.chunk(features, self.num_features, dim=-1) else: cat_feature_slices = [features] return torch.cat( [ embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices) ], dim=-1, ) class TimeSeriesStdScaler(nn.Module): """ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by subtracting from the mean and dividing by the standard deviation. """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5 def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale class TimeSeriesMeanScaler(nn.Module): """ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 self.default_scale = config.default_scale if hasattr(config, "default_scale") else None def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) scale = ts_sum / torch.clamp(num_observed, min=1) # If `default_scale` is provided, we use it, otherwise we use the scale # of the batch. if self.default_scale is None: batch_sum = ts_sum.sum(dim=0) batch_observations = torch.clamp(num_observed.sum(0), min=1) default_scale = torch.squeeze(batch_sum / batch_observations) else: default_scale = self.default_scale * torch.ones_like(scale) # apply default scale where there are no observations scale = torch.where(num_observed > 0, scale, default_scale) # ensure the scale is at least `self.minimum_scale` scale = torch.clamp(scale, min=self.minimum_scale) scaled_data = data / scale if not self.keepdim: scale = scale.squeeze(dim=self.dim) return scaled_data, torch.zeros_like(scale), scale class TimeSeriesNOPScaler(nn.Module): """ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor] = None ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: """ Computes the negative log likelihood loss from input distribution with respect to target. """ return -input.log_prob(target) def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor: """ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero, meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`. Args: input_tensor (`torch.FloatTensor`): Input tensor, of which the average must be computed. weights (`torch.FloatTensor`, *optional*): Weights tensor, of the same shape as `input_tensor`. dim (`int`, *optional*): The dim along which to average `input_tensor`. Returns: `torch.FloatTensor`: The tensor with values averaged along the specified `dim`. """ if weights is not None: weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights else: return input_tensor.mean(dim=dim) # Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->TimeSeries class TimeSeriesSinusoidalPositionalEmbedding(nn.Embedding): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None: super().__init__(num_positions, embedding_dim) def _init_weight(self): """ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in the 2nd half of the vector. [dim // 2:] """ n_pos, dim = self.weight.shape position_enc = np.array( [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)] ) out = torch.empty(n_pos, dim, dtype=self.weight.dtype, requires_grad=False) sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1 out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) self.weight = nn.Parameter(out, requires_grad=False) @torch.no_grad() def forward( self, input_ids_shape: torch.Size, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None ) -> torch.Tensor: """`input_ids_shape` is expected to be [bsz x seqlen].""" if position_ids is None: bsz, seq_len = input_ids_shape[:2] position_ids = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ) return super().forward(position_ids) class TimeSeriesValueEmbedding(nn.Module): def __init__(self, feature_size, d_model): super().__init__() self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False) def forward(self, x): return self.value_projection(x) # Copied from transformers.models.bart.modeling_bart.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: Optional[float] = None, dropout: float = 0.0, head_mask: Optional[torch.Tensor] = None, **kwargs, ): if scaling is None: scaling = query.size(-1) ** -0.5 attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) if head_mask is not None: attn_weights = attn_weights * head_mask.view(1, -1, 1, 1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->TimeSeriesTransformer class TimeSeriesTransformerAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[TimeSeriesTransformerConfig] = None, layer_idx: Optional[int] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once( f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and " "will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.Tensor] = None, # TODO: we need a refactor so that the different attention modules can get their specific kwargs # ATM, we have mixed things encoder, decoder, and encoder-decoder attn **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) # get query proj query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(*kv_input_shape).transpose(1, 2) value_states = value_states.view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights # Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->TimeSeriesTransformer, BART->TIME_SERIES_TRANSFORMER class TimeSeriesTransformerEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: TimeSeriesTransformerConfig, layer_idx: Optional[int] = None): super().__init__() self.embed_dim = config.d_model self.self_attn = TimeSeriesTransformerAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, layer_idx=layer_idx, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->TimeSeriesTransformer, with BART->TIME_SERIES_TRANSFORMER class TimeSeriesTransformerDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: TimeSeriesTransformerConfig, layer_idx: Optional[int] = None): super().__init__() self.embed_dim = config.d_model self.self_attn = TimeSeriesTransformerAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, layer_idx=layer_idx, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = TimeSeriesTransformerAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs @auto_docstring class TimeSeriesTransformerPreTrainedModel(PreTrainedModel): config: TimeSeriesTransformerConfig base_model_prefix = "model" main_input_name = "past_values" supports_gradient_checkpointing = True # TODO: tests would need a rewrite to check for correct implementation # Current tests always assume certain inputs to be passed _supports_flash_attn = False _supports_sdpa = False _supports_flex_attn = False def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, TimeSeriesSinusoidalPositionalEmbedding): module._init_weight() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() # Copied from transformers.models.bart.modeling_bart.BartPreTrainedModel._update_full_mask def _update_full_mask( self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor, ): if attention_mask is not None: if self.config._attn_implementation == "flash_attention_2": attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & head_mask can not be supported when using SDPA, fall back to # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenDecoder._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int, ): if self.config._attn_implementation == "flash_attention_2": # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, input_shape, inputs_embeds, past_key_values_length, ) elif self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) # Other attention flavors support in-built causal (when `mask is None`) # while we need to create our specific block mask regardless elif attention_mask is None: attention_mask = make_flex_block_causal_mask( torch.ones( size=(input_shape), device=inputs_embeds.device, ) ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) return attention_mask # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenDecoder._update_cross_attn_mask def _update_cross_attn_mask( self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, ): # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: if self.config._attn_implementation == "flash_attention_2": encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1], ) elif self.config._attn_implementation == "flex_attention": if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask( encoder_attention_mask, query_length=input_shape[-1], is_causal=False, ) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) return encoder_attention_mask class TimeSeriesTransformerEncoder(TimeSeriesTransformerPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TimeSeriesTransformerEncoderLayer`]. Args: config: TimeSeriesTransformerConfig """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop if config.prediction_length is None: raise ValueError("The `prediction_length` config needs to be specified.") self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding( config.context_length + config.prediction_length, config.d_model ) self.layers = nn.ModuleList([TimeSeriesTransformerEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Args: attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size()) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) attention_mask = self._update_full_mask( attention_mask, inputs_embeds, ) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class TimeSeriesTransformerDecoder(TimeSeriesTransformerPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TimeSeriesTransformerDecoderLayer`] Args: config: TimeSeriesTransformerConfig """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop if config.prediction_length is None: raise ValueError("The `prediction_length` config needs to be specified.") self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding( config.context_length + config.prediction_length, config.d_model ) self.layers = nn.ModuleList( [TimeSeriesTransformerDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)] ) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" Args: attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = inputs_embeds.size()[:-1] # initialize `past_key_values` if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + input_shape[1], device=inputs_embeds.device ) attention_mask = self._update_causal_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length, ) encoder_attention_mask = self._update_cross_attn_mask( encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds, ) hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer( hidden_states, attention_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @auto_docstring class TimeSeriesTransformerModel(TimeSeriesTransformerPreTrainedModel): def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) if config.scaling == "mean" or config.scaling is True: self.scaler = TimeSeriesMeanScaler(config) elif config.scaling == "std": self.scaler = TimeSeriesStdScaler(config) else: self.scaler = TimeSeriesNOPScaler(config) if config.num_static_categorical_features > 0: self.embedder = TimeSeriesFeatureEmbedder( cardinalities=config.cardinality, embedding_dims=config.embedding_dimension, ) # transformer encoder-decoder and mask initializer self.encoder = TimeSeriesTransformerEncoder(config) self.decoder = TimeSeriesTransformerDecoder(config) # Initialize weights and apply final processing self.post_init() @property def _past_length(self) -> int: return self.config.context_length + max(self.config.lags_sequence) def get_lagged_subsequences( self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0 ) -> torch.Tensor: """ Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :]. Args: sequence: Tensor The sequence from which lagged subsequences should be extracted. Shape: (N, T, C). subsequences_length : int Length of the subsequences to be extracted. shift: int Shift the lags by this amount back. """ sequence_length = sequence.shape[1] indices = [lag - shift for lag in self.config.lags_sequence] if max(indices) + subsequences_length > sequence_length: raise ValueError( f"lags cannot go further than history length, found lag {max(indices)} " f"while history length is only {sequence_length}" ) lagged_values = [] for lag_index in indices: begin_index = -lag_index - subsequences_length end_index = -lag_index if lag_index > 0 else None lagged_values.append(sequence[:, begin_index:end_index, ...]) return torch.stack(lagged_values, dim=-1) def create_network_inputs( self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, past_observed_mask: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, ): # time feature time_feat = ( torch.cat( ( past_time_features[:, self._past_length - self.config.context_length :, ...], future_time_features, ), dim=1, ) if future_values is not None else past_time_features[:, self._past_length - self.config.context_length :, ...] ) # target if past_observed_mask is None: past_observed_mask = torch.ones_like(past_values) context = past_values[:, -self.config.context_length :] observed_context = past_observed_mask[:, -self.config.context_length :] _, loc, scale = self.scaler(context, observed_context) inputs = ( (torch.cat((past_values, future_values), dim=1) - loc) / scale if future_values is not None else (past_values - loc) / scale ) # static features if loc.ndim == 3: squeezed_loc = loc.squeeze(1) squeezed_scale = scale.squeeze(1) else: squeezed_loc = loc squeezed_scale = scale log_abs_loc = squeezed_loc.abs().log1p() log_scale = squeezed_scale.log() static_feat = torch.cat((log_abs_loc, log_scale), dim=1) if static_real_features is not None: static_feat = torch.cat((static_real_features, static_feat), dim=1) if static_categorical_features is not None: embedded_cat = self.embedder(static_categorical_features) static_feat = torch.cat((embedded_cat, static_feat), dim=1) expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1) # all features features = torch.cat((expanded_static_feat, time_feat), dim=-1) # lagged features subsequences_length = ( self.config.context_length + self.config.prediction_length if future_values is not None else self.config.context_length ) lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]: raise ValueError( f"input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match" ) # transformer inputs transformer_inputs = torch.cat((reshaped_lagged_sequence, features), dim=-1) return transformer_inputs, loc, scale, static_feat def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @auto_docstring def forward( self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, use_cache: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Seq2SeqTSModelOutput, tuple]: r""" past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`): Past values of the time series, that serve as context in order to predict the future. The sequence size of this tensor must be larger than the `context_length` of the model, since the model will use the larger size to construct lag features, i.e. additional values from the past which are added in order to serve as "extra context". The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of the past. The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags). Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`): Required time features, which the model internally will add to `past_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*): Optional static categorical features for which the model will learn an embedding, which it will add to the values of the time series. Static categorical features are features which have the same value for all time steps (static over time). A typical example of a static categorical feature is a time series ID. static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*): Optional static real features which the model will add to the values of the time series. Static real features are features which have the same value for all time steps (static over time). A typical example of a static real feature is promotion information. future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*): Future values of the time series, that serve as labels for the model. The `future_values` is what the Transformer needs during training to learn to output, given the `past_values`. The sequence length here is equal to `prediction_length`. See the demo notebook and code snippets for details. Optionally, during training any missing values need to be replaced with zeros and indicated via the `future_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`): Required time features for the prediction window, which the model internally will add to `future_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import TimeSeriesTransformerModel >>> file = hf_hub_download( ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> model = TimeSeriesTransformerModel.from_pretrained("huggingface/time-series-transformer-tourism-monthly") >>> # during training, one provides both past and future values >>> # as well as possible additional features >>> outputs = model( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_values=batch["future_values"], ... future_time_features=batch["future_time_features"], ... ) >>> last_hidden_state = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_inputs, loc, scale, static_feat = self.create_network_inputs( past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, ) if encoder_outputs is None: enc_input = transformer_inputs[:, : self.config.context_length, ...] encoder_outputs = self.encoder( inputs_embeds=enc_input, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Avoid empty tensors and instead create a zeroes tensor which # will be treated the same in torch, i.e. matmul with empty == all 0s if self.config.context_length >= transformer_inputs.shape[1]: bsz, _, dim = transformer_inputs.shape dec_input = torch.zeros( size=(bsz, 1, dim), device=transformer_inputs.device, dtype=transformer_inputs.dtype ) else: dec_input = transformer_inputs[:, self.config.context_length :, ...] decoder_outputs = self.decoder( inputs_embeds=dec_input, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) if not return_dict: return decoder_outputs + encoder_outputs + (loc, scale, static_feat) return Seq2SeqTSModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, loc=loc, scale=scale, static_features=static_feat, ) @auto_docstring class TimeSeriesTransformerForPrediction(TimeSeriesTransformerPreTrainedModel): def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) self.model = TimeSeriesTransformerModel(config) if config.distribution_output == "student_t": self.distribution_output = StudentTOutput(dim=config.input_size) elif config.distribution_output == "normal": self.distribution_output = NormalOutput(dim=config.input_size) elif config.distribution_output == "negative_binomial": self.distribution_output = NegativeBinomialOutput(dim=config.input_size) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model) self.target_shape = self.distribution_output.event_shape if config.loss == "nll": self.loss = nll else: raise ValueError(f"Unknown loss function {config.loss}") # Initialize weights of distribution_output and apply final processing self.post_init() def output_params(self, dec_output): return self.parameter_projection(dec_output) def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() @torch.jit.ignore def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution: sliced_params = params if trailing_n is not None: sliced_params = [p[:, -trailing_n:] for p in params] return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale) @auto_docstring def forward( self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, future_observed_mask: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, use_cache: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Seq2SeqTSModelOutput, tuple]: r""" past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`): Past values of the time series, that serve as context in order to predict the future. The sequence size of this tensor must be larger than the `context_length` of the model, since the model will use the larger size to construct lag features, i.e. additional values from the past which are added in order to serve as "extra context". The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of the past. The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags). Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`): Required time features, which the model internally will add to `past_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*): Optional static categorical features for which the model will learn an embedding, which it will add to the values of the time series. Static categorical features are features which have the same value for all time steps (static over time). A typical example of a static categorical feature is a time series ID. static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*): Optional static real features which the model will add to the values of the time series. Static real features are features which have the same value for all time steps (static over time). A typical example of a static real feature is promotion information. future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*): Future values of the time series, that serve as labels for the model. The `future_values` is what the Transformer needs during training to learn to output, given the `past_values`. The sequence length here is equal to `prediction_length`. See the demo notebook and code snippets for details. Optionally, during training any missing values need to be replaced with zeros and indicated via the `future_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`): Required time features for the prediction window, which the model internally will add to `future_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*): Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). This mask is used to filter out missing values for the final loss calculation. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import TimeSeriesTransformerForPrediction >>> file = hf_hub_download( ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> model = TimeSeriesTransformerForPrediction.from_pretrained( ... "huggingface/time-series-transformer-tourism-monthly" ... ) >>> # during training, one provides both past and future values >>> # as well as possible additional features >>> outputs = model( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_values=batch["future_values"], ... future_time_features=batch["future_time_features"], ... ) >>> loss = outputs.loss >>> loss.backward() >>> # during inference, one only provides past values >>> # as well as possible additional features >>> # the model autoregressively generates future values >>> outputs = model.generate( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_time_features=batch["future_time_features"], ... ) >>> mean_prediction = outputs.sequences.mean(dim=1) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if future_values is not None: use_cache = False outputs = self.model( past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, use_cache=use_cache, return_dict=return_dict, cache_position=cache_position, ) prediction_loss = None params = None if future_values is not None: params = self.output_params(outputs[0]) # outputs.last_hidden_state # loc is 3rd last and scale is 2nd last output distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2]) loss = self.loss(distribution, future_values) if future_observed_mask is None: future_observed_mask = torch.ones_like(future_values) if len(self.target_shape) == 0: loss_weights = future_observed_mask else: loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False) prediction_loss = weighted_average(loss, weights=loss_weights) if not return_dict: outputs = ((params,) + outputs[1:]) if params is not None else outputs[1:] return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs return Seq2SeqTSPredictionOutput( loss=prediction_loss, params=params, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loc=outputs.loc, scale=outputs.scale, static_features=outputs.static_features, ) @torch.no_grad() def generate( self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> SampleTSPredictionOutput: r""" Greedily generate sequences of sample predictions from a model with a probability distribution head. Parameters: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`): Past values of the time series, that serve as context in order to predict the future. The sequence size of this tensor must be larger than the `context_length` of the model, since the model will use the larger size to construct lag features, i.e. additional values from the past which are added in order to serve as "extra context". The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of the past. The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags). Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`): Required time features, which the model internally will add to `past_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`): Required time features for the prediction window, which the model internally will add to sampled predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*): Optional static categorical features for which the model will learn an embedding, which it will add to the values of the time series. Static categorical features are features which have the same value for all time steps (static over time). A typical example of a static categorical feature is a time series ID. static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*): Optional static real features which the model will add to the values of the time series. Static real features are features which have the same value for all time steps (static over time). A typical example of a static real feature is promotion information. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. Return: [`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for multivariate predictions. """ outputs = self( static_categorical_features=static_categorical_features, static_real_features=static_real_features, past_time_features=past_time_features, past_values=past_values, past_observed_mask=past_observed_mask, future_time_features=future_time_features, future_values=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, use_cache=True, ) decoder = self.model.get_decoder() enc_last_hidden = outputs.encoder_last_hidden_state loc = outputs.loc scale = outputs.scale static_feat = outputs.static_features num_parallel_samples = self.config.num_parallel_samples repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_past_values = ( past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc ) / repeated_scale expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1) features = torch.cat((expanded_static_feat, future_time_features), dim=-1) repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0) future_samples = [] # greedy decoding for k in range(self.config.prediction_length): lagged_sequence = self.model.get_lagged_subsequences( sequence=repeated_past_values, subsequences_length=1 + k, shift=1, ) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, : k + 1]), dim=-1) dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden) dec_last_hidden = dec_output.last_hidden_state params = self.parameter_projection(dec_last_hidden[:, -1:]) distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale) next_sample = distr.sample() repeated_past_values = torch.cat( (repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1 ) future_samples.append(next_sample) concat_future_samples = torch.cat(future_samples, dim=1) return SampleTSPredictionOutput( sequences=concat_future_samples.reshape( (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape, ) ) __all__ = ["TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel"]
transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py/0
{ "file_path": "transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py", "repo_id": "transformers", "token_count": 39486 }
532
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Union import torch from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import ImageClassifierOutput, ModelOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, is_timm_available, requires_backends from .configuration_timm_wrapper import TimmWrapperConfig if is_timm_available(): import timm @dataclass @auto_docstring( custom_intro=""" Output class for models TimmWrapperModel, containing the last hidden states, an optional pooled output, and optional hidden states. """ ) class TimmWrapperModelOutput(ModelOutput): r""" last_hidden_state (`torch.FloatTensor`): The last hidden state of the model, output before applying the classification head. pooler_output (`torch.FloatTensor`, *optional*): The pooled output derived from the last hidden state, if applicable. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned if `output_hidden_states=True` is set or if `config.output_hidden_states=True`): A tuple containing the intermediate hidden states of the model at the output of each layer or specified layers. attentions (`tuple(torch.FloatTensor)`, *optional*, returned if `output_attentions=True` is set or if `config.output_attentions=True`.): A tuple containing the intermediate attention weights of the model at the output of each layer. Note: Currently, Timm models do not support attentions output. """ last_hidden_state: torch.FloatTensor pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @auto_docstring class TimmWrapperPreTrainedModel(PreTrainedModel): main_input_name = "pixel_values" config: TimmWrapperConfig _no_split_modules = [] model_tags = ["timm"] # used in Trainer to avoid passing `loss_kwargs` to model forward accepts_loss_kwargs = False def __init__(self, *args, **kwargs): requires_backends(self, ["vision", "timm"]) super().__init__(*args, **kwargs) def post_init(self): self.supports_gradient_checkpointing = self._timm_model_supports_gradient_checkpointing() super().post_init() @staticmethod def _fix_state_dict_key_on_load(key) -> tuple[str, bool]: """ Overrides original method that renames `gamma` and `beta` to `weight` and `bias`. We don't want this behavior for timm wrapped models. Instead, this method adds a "timm_model." prefix to enable loading official timm Hub checkpoints. """ if "timm_model." not in key: return f"timm_model.{key}", True return key, False def _fix_state_dict_key_on_save(self, key): """ Overrides original method to remove "timm_model." prefix from state_dict keys. Makes the saved checkpoint compatible with the `timm` library. """ return key.replace("timm_model.", ""), True def load_state_dict(self, state_dict, *args, **kwargs): """ Override original method to fix state_dict keys on load for cases when weights are loaded without using the `from_pretrained` method (e.g., in Trainer to resume from checkpoint). """ state_dict = {self._fix_state_dict_key_on_load(k)[0]: v for k, v in state_dict.items()} return super().load_state_dict(state_dict, *args, **kwargs) def _init_weights(self, module): """ Initialize weights function to properly initialize Linear layer weights. Since model architectures may vary, we assume only the classifier requires initialization, while all other weights should be loaded from the checkpoint. """ if isinstance(module, (nn.Linear)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def _timm_model_supports_gradient_checkpointing(self): """ Check if the timm model supports gradient checkpointing by checking if the `set_grad_checkpointing` method is available. Some timm models will have the method but will raise an AssertionError when called so in this case we return False. """ if not hasattr(self.timm_model, "set_grad_checkpointing"): return False try: self.timm_model.set_grad_checkpointing(enable=True) self.timm_model.set_grad_checkpointing(enable=False) return True except Exception: return False def _set_gradient_checkpointing(self, enable: bool = True, *args, **kwargs): self.timm_model.set_grad_checkpointing(enable) class TimmWrapperModel(TimmWrapperPreTrainedModel): """ Wrapper class for timm models to be used in transformers. """ def __init__(self, config: TimmWrapperConfig): super().__init__(config) # using num_classes=0 to avoid creating classification head extra_init_kwargs = config.model_args or {} self.timm_model = timm.create_model(config.architecture, pretrained=False, num_classes=0, **extra_init_kwargs) self.post_init() @auto_docstring def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[Union[bool, list[int]]] = None, return_dict: Optional[bool] = None, do_pooling: Optional[bool] = None, **kwargs, ) -> Union[TimmWrapperModelOutput, tuple[Tensor, ...]]: r""" output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. Not compatible with timm wrapped models. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. Not compatible with timm wrapped models. do_pooling (`bool`, *optional*): Whether to do pooling for the last_hidden_state in `TimmWrapperModel` or not. If `None` is passed, the `do_pooling` value from the config is used. Examples: ```python >>> import torch >>> from PIL import Image >>> from urllib.request import urlopen >>> from transformers import AutoModel, AutoImageProcessor >>> # Load image >>> image = Image.open(urlopen( ... 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' ... )) >>> # Load model and image processor >>> checkpoint = "timm/resnet50.a1_in1k" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) >>> model = AutoModel.from_pretrained(checkpoint).eval() >>> # Preprocess image >>> inputs = image_processor(image) >>> # Forward pass >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # Get pooled output >>> pooled_output = outputs.pooler_output >>> # Get last hidden state >>> last_hidden_state = outputs.last_hidden_state ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions do_pooling = do_pooling if do_pooling is not None else self.config.do_pooling if output_attentions: raise ValueError("Cannot set `output_attentions` for timm models.") if output_hidden_states and not hasattr(self.timm_model, "forward_intermediates"): raise ValueError( "The 'output_hidden_states' option cannot be set for this timm model. " "To enable this feature, the 'forward_intermediates' method must be implemented " "in the timm model (available in timm versions > 1.*). Please consider using a " "different architecture or updating the timm package to a compatible version." ) pixel_values = pixel_values.to(self.device, self.dtype) if output_hidden_states: # to enable hidden states selection if isinstance(output_hidden_states, (list, tuple)): kwargs["indices"] = output_hidden_states last_hidden_state, hidden_states = self.timm_model.forward_intermediates(pixel_values, **kwargs) else: last_hidden_state = self.timm_model.forward_features(pixel_values, **kwargs) hidden_states = None if do_pooling: # classification head is not created, applying pooling only pooler_output = self.timm_model.forward_head(last_hidden_state) else: pooler_output = None if not return_dict: outputs = (last_hidden_state, pooler_output, hidden_states) outputs = tuple(output for output in outputs if output is not None) return outputs return TimmWrapperModelOutput( last_hidden_state=last_hidden_state, pooler_output=pooler_output, hidden_states=hidden_states, ) class TimmWrapperForImageClassification(TimmWrapperPreTrainedModel): """ Wrapper class for timm models to be used in transformers for image classification. """ def __init__(self, config: TimmWrapperConfig): super().__init__(config) if config.num_labels == 0: raise ValueError( "You are trying to load weights into `TimmWrapperForImageClassification` from a checkpoint with no classifier head. " "Please specify the number of classes, e.g. `model = TimmWrapperForImageClassification.from_pretrained(..., num_labels=10)`, " "or use `TimmWrapperModel` for feature extraction." ) extra_init_kwargs = config.model_args or {} self.timm_model = timm.create_model( config.architecture, pretrained=False, num_classes=config.num_labels, **extra_init_kwargs ) self.num_labels = config.num_labels self.post_init() @auto_docstring def forward( self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[Union[bool, list[int]]] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[ImageClassifierOutput, tuple[Tensor, ...]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. Not compatible with timm wrapped models. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. Not compatible with timm wrapped models. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. **kwargs: Additional keyword arguments passed along to the `timm` model forward. Examples: ```python >>> import torch >>> from PIL import Image >>> from urllib.request import urlopen >>> from transformers import AutoModelForImageClassification, AutoImageProcessor >>> # Load image >>> image = Image.open(urlopen( ... 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' ... )) >>> # Load model and image processor >>> checkpoint = "timm/resnet50.a1_in1k" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) >>> model = AutoModelForImageClassification.from_pretrained(checkpoint).eval() >>> # Preprocess image >>> inputs = image_processor(image) >>> # Forward pass >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # Get top 5 predictions >>> top5_probabilities, top5_class_indices = torch.topk(logits.softmax(dim=1) * 100, k=5) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("Cannot set `output_attentions` for timm models.") if output_hidden_states and not hasattr(self.timm_model, "forward_intermediates"): raise ValueError( "The 'output_hidden_states' option cannot be set for this timm model. " "To enable this feature, the 'forward_intermediates' method must be implemented " "in the timm model (available in timm versions > 1.*). Please consider using a " "different architecture or updating the timm package to a compatible version." ) pixel_values = pixel_values.to(self.device, self.dtype) if output_hidden_states: # to enable hidden states selection if isinstance(output_hidden_states, (list, tuple)): kwargs["indices"] = output_hidden_states last_hidden_state, hidden_states = self.timm_model.forward_intermediates(pixel_values, **kwargs) logits = self.timm_model.forward_head(last_hidden_state) else: logits = self.timm_model(pixel_values, **kwargs) hidden_states = None loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: outputs = (loss, logits, hidden_states) outputs = tuple(output for output in outputs if output is not None) return outputs return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=hidden_states, ) __all__ = ["TimmWrapperPreTrainedModel", "TimmWrapperModel", "TimmWrapperForImageClassification"]
transformers/src/transformers/models/timm_wrapper/modeling_timm_wrapper.py/0
{ "file_path": "transformers/src/transformers/models/timm_wrapper/modeling_timm_wrapper.py", "repo_id": "transformers", "token_count": 6605 }
533
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for UDOP. """ from typing import Optional, Union from transformers import logging from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput logger = logging.get_logger(__name__) class UdopTextKwargs(TextKwargs, total=False): word_labels: Optional[Union[list[int], list[list[int]]]] boxes: Union[list[list[int]], list[list[list[int]]]] class UdopProcessorKwargs(ProcessingKwargs, total=False): text_kwargs: UdopTextKwargs _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "truncation": False, "stride": 0, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_length": False, "verbose": True, }, "images_kwargs": {}, } class UdopProcessor(ProcessorMixin): r""" Constructs a UDOP processor which combines a LayoutLMv3 image processor and a UDOP tokenizer into a single processor. [`UdopProcessor`] offers all the functionalities you need to prepare data for the model. It first uses [`LayoutLMv3ImageProcessor`] to resize, rescale and normalize document images, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to [`UdopTokenizer`] or [`UdopTokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned into token-level `labels` for token classification tasks (such as FUNSD, CORD). Additionally, it also supports passing `text_target` and `text_pair_target` to the tokenizer, which can be used to prepare labels for language modeling tasks. Args: image_processor (`LayoutLMv3ImageProcessor`): An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input. tokenizer (`UdopTokenizer` or `UdopTokenizerFast`): An instance of [`UdopTokenizer`] or [`UdopTokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "LayoutLMv3ImageProcessor" tokenizer_class = ("UdopTokenizer", "UdopTokenizerFast") def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__( self, images: Optional[ImageInput] = None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[UdopProcessorKwargs], ) -> BatchFeature: """ This method first forwards the `images` argument to [`~UdopImageProcessor.__call__`]. In case [`UdopImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and bounding boxes along with the additional arguments to [`~UdopTokenizer.__call__`] and returns the output, together with the prepared `pixel_values`. In case [`UdopImageProcessor`] was initialized with `apply_ocr` set to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional arguments to [`~UdopTokenizer.__call__`] and returns the output, together with the prepared `pixel_values`. Alternatively, one can pass `text_target` and `text_pair_target` to prepare the targets of UDOP. Please refer to the docstring of the above two methods for more information. """ # verify input output_kwargs = self._merge_kwargs( UdopProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) boxes = output_kwargs["text_kwargs"].pop("boxes", None) word_labels = output_kwargs["text_kwargs"].pop("word_labels", None) text_pair = output_kwargs["text_kwargs"].pop("text_pair", None) return_overflowing_tokens = output_kwargs["text_kwargs"].get("return_overflowing_tokens", False) return_offsets_mapping = output_kwargs["text_kwargs"].get("return_offsets_mapping", False) text_target = output_kwargs["text_kwargs"].get("text_target", None) if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens and not return_offsets_mapping: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.") if text_target is not None: # use the processor to prepare the targets of UDOP return self.tokenizer( **output_kwargs["text_kwargs"], ) else: # use the processor to prepare the inputs of UDOP # first, apply the image processor features = self.image_processor(images=images, **output_kwargs["images_kwargs"]) features_words = features.pop("words", None) features_boxes = features.pop("boxes", None) output_kwargs["text_kwargs"].pop("text_target", None) output_kwargs["text_kwargs"].pop("text_pair_target", None) output_kwargs["text_kwargs"]["text_pair"] = text_pair output_kwargs["text_kwargs"]["boxes"] = boxes if boxes is not None else features_boxes output_kwargs["text_kwargs"]["word_labels"] = word_labels # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(text, str): text = [text] # add batch dimension (as the image processor always adds a batch dimension) output_kwargs["text_kwargs"]["text_pair"] = features_words encoded_inputs = self.tokenizer( text=text if text is not None else features_words, **output_kwargs["text_kwargs"], ) # add pixel values if return_overflowing_tokens is True: features["pixel_values"] = self.get_overflowing_images( features["pixel_values"], encoded_inputs["overflow_to_sample_mapping"] ) features.update(encoded_inputs) return features # Copied from transformers.models.layoutlmv3.processing_layoutlmv3.LayoutLMv3Processor.get_overflowing_images def get_overflowing_images(self, images, overflow_to_sample_mapping): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image images_with_overflow = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx]) if len(images_with_overflow) != len(overflow_to_sample_mapping): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}" ) return images_with_overflow @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(tokenizer_input_names + image_processor_input_names + ["bbox"]) __all__ = ["UdopProcessor"]
transformers/src/transformers/models/udop/processing_udop.py/0
{ "file_path": "transformers/src/transformers/models/udop/processing_udop.py", "repo_id": "transformers", "token_count": 3361 }
534
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/unispeech_sat/modular_unispeech_sat.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_unispeech_sat.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import warnings from dataclasses import dataclass from typing import Callable, Optional, Union import numpy as np import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...integrations.fsdp import is_fsdp_managed_module from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, CausalLMOutput, ModelOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available, logging from .configuration_unispeech_sat import UniSpeechSatConfig if is_torch_flex_attn_available(): from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Output type of [`UniSpeechSatForPreTrainingOutput`], with potential hidden states and attentions. """ ) class UniSpeechSatForPreTrainingOutput(ModelOutput): r""" loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`, *optional*): Prediction scores of the contrastive loss model, i.e. the output of the model before the final softmax. projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. codevector_perplexity (`torch.FloatTensor` of shape `(1,)`): The perplexity of the codevector distribution, used to measure the diversity of the codebook. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None projected_states: Optional[torch.FloatTensor] = None projected_quantized_states: Optional[torch.FloatTensor] = None codevector_perplexity: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None class UniSpeechSatSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states class UniSpeechSatPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, ) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name="weight", dim=2) if hasattr(self.conv, "parametrizations"): weight_g = self.conv.parametrizations.weight.original0 weight_v = self.conv.parametrizations.weight.original1 else: weight_g = self.conv.weight_g weight_v = self.conv.weight_v deepspeed.zero.register_external_parameter(self, weight_v) deepspeed.zero.register_external_parameter(self, weight_g) else: self.conv = weight_norm(self.conv, name="weight", dim=2) self.padding = UniSpeechSatSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class UniSpeechSatNoLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class UniSpeechSatLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states class UniSpeechSatGroupNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class UniSpeechSatFeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [UniSpeechSatGroupNormConvLayer(config, layer_id=0)] + [ UniSpeechSatNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [ UniSpeechSatLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers) ] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: hidden_states = conv_layer(hidden_states) return hidden_states class UniSpeechSatFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: Optional[float] = None, dropout: float = 0.0, head_mask: Optional[torch.Tensor] = None, **kwargs, ): if scaling is None: scaling = query.size(-1) ** -0.5 attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) if head_mask is not None: attn_weights = attn_weights * head_mask.view(1, -1, 1, 1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class UniSpeechSatAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[UniSpeechSatConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, # TODO: we need a refactor so that the different attention modules can get their specific kwargs # ATM, we have mixed things encoder, decoder, and encoder-decoder attn **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) # get query proj query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) current_states = key_value_states if is_cross_attention else hidden_states key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights, None class UniSpeechSatFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states class UniSpeechSatEncoderLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.attention = UniSpeechSatAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechSatFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, output_attentions=False): attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class UniSpeechSatEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechSatPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([UniSpeechSatEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 attention_mask = self._update_full_mask( attention_mask, hidden_states, ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.layerdrop if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def _update_full_mask( self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor, ): if attention_mask is not None: if self.config._attn_implementation == "flash_attention_2": attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & head_mask can not be supported when using SDPA, fall back to # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask class UniSpeechSatAttnAdapterLayer(nn.Module): def __init__(self, config): """ Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed up training throughput. """ super().__init__() self.input_dim = config.adapter_attn_dim self.hidden_dim = config.hidden_size self.norm = nn.LayerNorm(self.hidden_dim) self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim) self.act_fn = nn.ReLU() self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim) def forward(self, hidden_states: torch.FloatTensor): hidden_states = self.norm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class UniSpeechSatEncoderLayerStableLayerNorm(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.attention = UniSpeechSatAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechSatFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if getattr(config, "adapter_attn_dim", None) is not None: self.adapter_layer = UniSpeechSatAttnAdapterLayer(config) else: self.adapter_layer = None def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) if self.adapter_layer is not None: hidden_states = hidden_states + self.adapter_layer(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class UniSpeechSatEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechSatPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [UniSpeechSatEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 attention_mask = self._update_full_mask( attention_mask, hidden_states, ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.layerdrop if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def _update_full_mask( self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor, ): if attention_mask is not None: if self.config._attn_implementation == "flash_attention_2": attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & head_mask can not be supported when using SDPA, fall back to # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask class UniSpeechSatGumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {config.codevector_dim} must be divisible " f"by `config.num_codevector_groups` {self.num_groups} for concatenation" ) # storage for codebook variables (codewords) self.codevectors = nn.Parameter( torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups) ) self.weight_proj = nn.Linear(config.hidden_size, self.num_groups * self.num_vars) # can be decayed for training self.temperature = 2 @staticmethod def _compute_perplexity(probs, mask=None): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: # sample code vector probs via gumbel in differentiateable way codevector_probs = nn.functional.gumbel_softmax( hidden_states.float(), tau=self.temperature, hard=True ).type_as(hidden_states) # compute perplexity codevector_soft_dist = torch.softmax( hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return codevectors, perplexity @auto_docstring class UniSpeechSatPreTrainedModel(PreTrainedModel): config: UniSpeechSatConfig base_model_prefix = "unispeech_sat" main_input_name = "input_values" supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def _init_weights(self, module): """Initialize the weights""" # gumbel softmax requires special init if isinstance(module, UniSpeechSatGumbelVectorQuantizer): module.weight_proj.weight.data.normal_(mean=0.0, std=1) module.weight_proj.bias.data.zero_() nn.init.uniform_(module.codevectors) elif isinstance(module, UniSpeechSatPositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, UniSpeechSatFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask def _compute_mask_indices( shape: tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.detach().sum(-1).tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask UniSpeechSatBaseModelOutput = Wav2Vec2BaseModelOutput @auto_docstring class UniSpeechSatModel(UniSpeechSatPreTrainedModel): def __init__(self, config: UniSpeechSatConfig): super().__init__(config) self.config = config self.feature_extractor = UniSpeechSatFeatureEncoder(config) self.feature_projection = UniSpeechSatFeatureProjection(config) self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = UniSpeechSatEncoderStableLayerNorm(config) else: self.encoder = UniSpeechSatEncoder(config) # Initialize weights and apply final processing self.post_init() def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://huggingface.co/papers/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, UniSpeechSatBaseModelOutput]: r""" mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return UniSpeechSatBaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" UniSpeechSat Model with a vector-quantization module and ctc loss for pre-training. """ ) class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel): def __init__(self, config: UniSpeechSatConfig): super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) self.dropout_features = nn.Dropout(config.feat_quantizer_dropout) self.quantizer = UniSpeechSatGumbelVectorQuantizer(config) self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim) self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim) self.dropout = nn.Dropout(config.final_dropout) self.speaker_proj = nn.Linear(config.hidden_size, config.codevector_dim) self.label_embeddings_concat = nn.Parameter(torch.FloatTensor(config.num_clusters, config.codevector_dim)) self.label_embeddings_concat.data.zero_() self.layer_norm_for_extract = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if self.config.do_stable_layer_norm: self.layer_norm_for_extract.requires_grad = False # Initialize weights and apply final processing self.post_init() def set_gumbel_temperature(self, temperature: int): """ Set the Gumbel softmax temperature to a given value. Only necessary for training """ self.quantizer.temperature = temperature def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() @staticmethod def compute_contrastive_logits( target_features: torch.FloatTensor, negative_features: torch.FloatTensor, predicted_features: torch.FloatTensor, temperature: int = 1, ): """ Compute logits for contrastive loss based using cosine similarity as the distance measure between `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. """ target_features = torch.cat([target_features, negative_features], dim=0) logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1) logits = logits.type_as(target_features) # apply temperature logits = logits / temperature return logits @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, UniSpeechSatForPreTrainingOutput]: r""" Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, UniSpeechSatForPreTraining >>> from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-sat-base") >>> model = UniSpeechSatForPreTraining.from_pretrained("microsoft/unispeech-sat-base") >>> # TODO: Add full pretraining example ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) transformer_features = outputs[0] # quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1]) # TODO(PVP) - add pretraining logic and add to tests logits = extract_features loss = quantized_features = codevector_perplexity = None if not return_dict: if loss is not None: return (loss, logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return (logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return UniSpeechSatForPreTrainingOutput( loss=loss, logits=logits, projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) _HIDDEN_STATES_START_POSITION = 2 @auto_docstring( custom_intro=""" UniSpeechSat Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). """ ) class UniSpeechSatForCTC(UniSpeechSatPreTrainedModel): def __init__(self, config, target_lang: Optional[str] = None): r""" target_lang (`str`, *optional*): Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechSatForCTC`] with adapters. Uses 'eng' by default. """ super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `UniSpeechSatForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() def tie_weights(self): """ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when passing `target_lang=...` to `from_pretrained(...)`. This method is **not** supposed to be called by the user and is prone to be changed in the future. """ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to # correctly load adapter layers for UniSpeechSat so that we do not have to introduce a new API to # [`PreTrainedModel`]. While slightly hacky, UniSpeechSat never has to tie input and output embeddings, so that it is # ok to repurpose this function here. target_lang = self.target_lang if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None: raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.") elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None: logger.info("By default `target_lang` is set to 'eng'.") elif target_lang is not None: self.load_adapter(target_lang, force_load=True) def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @auto_docstring( custom_intro=""" UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. """ ) class UniSpeechSatForSequenceClassification(UniSpeechSatPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of UniSpeechSat adapters (config.add_adapter=True)" ) self.unispeech_sat = UniSpeechSatModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechSatProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class UniSpeechSatForAudioFrameClassification(UniSpeechSatPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Audio frame classification does not support the use of UniSpeechSat adapters (config.add_adapter=True)" ) self.unispeech_sat = UniSpeechSatModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.num_labels = config.num_labels self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechSatProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class AMSoftmaxLoss(nn.Module): def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): super().__init__() self.scale = scale self.margin = margin self.num_labels = num_labels self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True) self.loss = nn.CrossEntropyLoss() def forward(self, hidden_states, labels): labels = labels.flatten() weight = nn.functional.normalize(self.weight, dim=0) hidden_states = nn.functional.normalize(hidden_states, dim=1) cos_theta = torch.mm(hidden_states, weight) psi = cos_theta - self.margin onehot = nn.functional.one_hot(labels, self.num_labels) logits = self.scale * torch.where(onehot.bool(), psi, cos_theta) loss = self.loss(logits, labels) return loss class TDNNLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] self.out_conv_dim = config.tdnn_dim[layer_id] self.kernel_size = config.tdnn_kernel[layer_id] self.dilation = config.tdnn_dilation[layer_id] self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) self.activation = nn.ReLU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if is_peft_available(): from peft.tuners.lora import LoraLayer if is_peft_available(): if isinstance(self.kernel, LoraLayer): warnings.warn( "Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. " "You should exclude TDNNLayer from LoRA's target modules.", ) # for backward compatibility, we keep nn.Linear but call F.conv1d for speed up hidden_states = hidden_states.transpose(1, 2) weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2) hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.activation(hidden_states) return hidden_states @auto_docstring( custom_intro=""" UniSpeechSat Model with an XVector feature extraction head on top for tasks like Speaker Verification. """ ) class UniSpeechSatForXVector(UniSpeechSatPreTrainedModel): def __init__(self, config): super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0]) tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))] self.tdnn = nn.ModuleList(tdnn_layers) self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim) self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim) self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels) self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the TDNN layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size in self.config.tdnn_kernel: input_lengths = _conv_out_length(input_lengths, kernel_size, 1) return input_lengths @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[tuple, XVectorOutput]: r""" input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`UniSpeechSatProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) for tdnn_layer in self.tdnn: hidden_states = tdnn_layer(hidden_states) # Statistic Pooling if attention_mask is None: mean_features = hidden_states.mean(dim=1) std_features = hidden_states.std(dim=1) else: feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) mean_features = [] std_features = [] for i, length in enumerate(tdnn_output_lengths): mean_features.append(hidden_states[i, :length].mean(dim=0)) std_features.append(hidden_states[i, :length].std(dim=0)) mean_features = torch.stack(mean_features) std_features = torch.stack(std_features) statistic_pooling = torch.cat([mean_features, std_features], dim=-1) output_embeddings = self.feature_extractor(statistic_pooling) logits = self.classifier(output_embeddings) loss = None if labels is not None: loss = self.objective(logits, labels) if not return_dict: output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return XVectorOutput( loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "UniSpeechSatForAudioFrameClassification", "UniSpeechSatForCTC", "UniSpeechSatForPreTraining", "UniSpeechSatForSequenceClassification", "UniSpeechSatForXVector", "UniSpeechSatModel", "UniSpeechSatPreTrainedModel", ]
transformers/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py/0
{ "file_path": "transformers/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py", "repo_id": "transformers", "token_count": 33209 }
535
# coding=utf-8 # Copyright 2024 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch VideoLlava model.""" from dataclasses import dataclass from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache from ...generation import GenerationMixin from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import ModelOutput from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ..auto import AutoModel from .configuration_video_llava import VideoLlavaConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Base class for VideoLlava base model outputs. """ ) class VideoLlavaModelOutputWithPast(ModelOutput): r""" past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. video_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`. video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None video_hidden_states: Optional[torch.FloatTensor] = None @dataclass @auto_docstring( custom_intro=""" Base class for VideoLlava causal language model (or autoregressive) outputs. """ ) class VideoLlavaCausalLMOutputWithPast(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. video_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`. video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None video_hidden_states: Optional[torch.FloatTensor] = None # Copied from transformers.models.llava.modeling_llava.LlavaMultiModalProjector with Llava->VideoLlava class VideoLlavaMultiModalProjector(nn.Module): def __init__(self, config: VideoLlavaConfig): super().__init__() # We have hidden_size * the number of vision feature layers num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer) self.linear_1 = nn.Linear( config.vision_config.hidden_size * num_feature_layers, config.text_config.hidden_size, bias=config.multimodal_projector_bias, ) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear( config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias ) def forward(self, image_features): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states @auto_docstring class VideoLlavaPreTrainedModel(PreTrainedModel): config: VideoLlavaConfig base_model_prefix = "" supports_gradient_checkpointing = True _no_split_modules = ["VideoLlavaVisionAttention"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_attention_backend = True def _init_weights(self, module): std = ( self.config.initializer_range if hasattr(self.config, "initializer_range") else self.config.text_config.initializer_range ) if hasattr(module, "class_embedding"): module.class_embedding.data.normal_(mean=0.0, std=std) if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @auto_docstring( custom_intro=""" The VideoLlava model which consists of a vision backbone and a language model without language modeling head. """, ) class VideoLlavaModel(VideoLlavaPreTrainedModel): _checkpoint_conversion_mapping = {"language_model.model": "language_model"} def __init__(self, config: VideoLlavaConfig): super().__init__(config) self.video_tower = AutoModel.from_config(config.vision_config) self.image_tower = AutoModel.from_config(config.vision_config) self.multi_modal_projector = VideoLlavaMultiModalProjector(config) self.vocab_size = config.text_config.vocab_size self.language_model = AutoModel.from_config(config.text_config) self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_decoder(self, decoder): self.language_model = decoder def get_decoder(self): return self.language_model def get_image_features( self, pixel_values_images: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, ): """ Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values_images (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`) The tensors corresponding to the input images. vision_feature_layer (`Union[int, list[int]]`, *optional*): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"` Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). """ vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) vision_feature_select_strategy = ( vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy ) if vision_feature_select_strategy not in ["default", "full"]: raise ValueError(f"Unexpected select feature strategy: {self.config.vision_feature_select_strategy}") image_outputs = self.image_tower(pixel_values_images, output_hidden_states=True) # If we have one vision feature layer, return the corresponding hidden states, # otherwise, select the hidden states of each feature layer and concatenate them if isinstance(vision_feature_layer, int): image_outputs = image_outputs.hidden_states[vision_feature_layer] if vision_feature_select_strategy == "default": image_outputs = image_outputs[:, 1:] else: hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer] # For default; crop CLS from each hidden state in the hidden state pool if vision_feature_select_strategy == "default": hs_pool = [hs[:, 1:] for hs in hs_pool] image_outputs = torch.cat(hs_pool, dim=-1) image_features = self.multi_modal_projector(image_outputs) return image_features def get_video_features( self, pixel_values_videos: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]] = None, ): """ Obtains video last hidden states from the vision tower and apply multimodal projection. Args: pixel_values_videos (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`) The tensors corresponding to the input videos. vision_feature_layer (`Union[int, list[int]]`, *optional*): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. Returns: video_features (`torch.Tensor`): Video feature tensor of shape `(num_videos * num_frames, image_length, embed_dim)`). frames (`int`): Number of frames the videos have. """ vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) batch_size_vid, num_frames, channels, height, width = pixel_values_videos.shape pixel_values = pixel_values_videos.reshape(batch_size_vid * num_frames, channels, height, width) video_outputs = self.video_tower(pixel_values, output_hidden_states=True) # If we have one vision feature layer, return the corresponding hidden states, # otherwise, select the hidden states of each feature layer and concatenate them if isinstance(vision_feature_layer, int): video_features = video_outputs.hidden_states[vision_feature_layer] else: hs_pool = [video_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer] video_features = torch.cat(hs_pool, dim=-1) video_features = self.multi_modal_projector(video_features) return video_features, num_frames def get_placeholder_mask( self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor = None, video_features: torch.FloatTensor = None, ): """ Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) special_video_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_video_mask = special_video_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_video_mask = input_ids == self.config.video_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel(): raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0] * image_features.shape[1]}" ) n_video_tokens = special_video_mask.sum() special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel(): raise ValueError( f"Videos features and image tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0] * video_features.shape[1]}" ) return special_image_mask, special_video_mask @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor = None, pixel_values_images: torch.FloatTensor = None, pixel_values_videos: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, VideoLlavaModelOutputWithPast]: r""" pixel_values_images (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)): The tensors corresponding to the input images. Pixel values can be obtained using [`AutoImageProcessor`]. See [`VideoLlavaImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses [`VideoLlavaImageProcessor`] for processing images). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) vision_feature_select_strategy = ( vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy ) if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if pixel_values_images is not None: image_features = self.get_image_features( pixel_values_images, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, ) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask, _ = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) if pixel_values_videos is not None: video_features, num_frames = self.get_video_features( pixel_values_videos=pixel_values_videos, vision_feature_layer=vision_feature_layer ) video_features = video_features.to(inputs_embeds.device, inputs_embeds.dtype) _, special_video_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, video_features=video_features ) inputs_embeds = inputs_embeds.masked_scatter(special_video_mask, video_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) return VideoLlavaModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_features if pixel_values_images is not None else None, video_hidden_states=video_features if pixel_values_videos is not None else None, ) @auto_docstring( custom_intro=""" The VideoLlava model which consists of a vision backbone and a language model. """ ) class VideoLlavaForConditionalGeneration(VideoLlavaPreTrainedModel, GenerationMixin): _checkpoint_conversion_mapping = { "^language_model.model": "model.language_model", "^image_tower": "model.image_tower", "^video_tower": "model.video_tower", "^multi_modal_projector": "model.multi_modal_projector", "^language_model.lm_head": "lm_head", } _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: VideoLlavaConfig): super().__init__(config) self.model = VideoLlavaModel(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def get_output_embeddings(self) -> nn.Module: return self.lm_head def set_decoder(self, decoder): self.model.set_decoder(decoder) def get_decoder(self): return self.model.get_decoder() def get_image_features( self, pixel_values_images: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, ): return self.model.get_image_features( pixel_values_images=pixel_values_images, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, ) # Make modules available through conditional class for BC @property def language_model(self): return self.model.language_model @property def video_tower(self): return self.model.video_tower @property def image_tower(self): return self.model.image_tower @property def multi_modal_projector(self): return self.model.multi_modal_projector @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor = None, pixel_values_images: torch.FloatTensor = None, pixel_values_videos: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[Union[int, list[int]]] = None, vision_feature_select_strategy: Optional[str] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, VideoLlavaCausalLMOutputWithPast]: r""" pixel_values_images (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)): The tensors corresponding to the input images. Pixel values can be obtained using [`AutoImageProcessor`]. See [`VideoLlavaImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses [`VideoLlavaImageProcessor`] for processing images). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> import numpy as np >>> import av >>> from huggingface_hub import hf_hub_download >>> from transformers import VideoLlavaProcessor, VideoLlavaForConditionalGeneration >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf") >>> processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf") >>> prompt = "USER: <video>\nWhy is this video funny? ASSISTANT:" >>> video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset") >>> container = av.open(video_path) >>> # sample uniformly 8 frames from the video >>> total_frames = container.streams.video[0].frames >>> indices = np.arange(0, total_frames, total_frames / 8).astype(int) >>> clip = read_video_pyav(container, indices) >>> inputs = processor(text=prompt, videos=clip, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_length=80) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER: Why is this video funny? ASSISTANT: The video is funny because the baby is playing with a Wii remote while sitting on the floor, and the baby is wearing glasses.Ъ. The baby's actions are amusing because it is a young child trying to interact with a video game, which is not a typical activity for a" >>> # to generate from image and video mix >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> prompt = [ ... "USER: <image>\nHow many cats do you see? ASSISTANT:", ... "USER: <video>\nWhy is this video funny? ASSISTANT:" ... ] >>> inputs = processor(text=prompt, images=image, videos=clip, padding=True, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_length=50) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) ['USER: How many cats do you see? ASSISTANT: There are two cats visible in the image. (or three, if you count the one in the background).', 'USER: Why is this video funny? ASSISTANT: The video is funny because it shows a baby sitting on a bed and playing with a Wii remote.Ъ. The baby is holding the remote'] ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) vision_feature_select_strategy = ( vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy ) outputs = self.model( input_ids=input_ids, pixel_values_images=pixel_values_images, pixel_values_videos=pixel_values_videos, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) return VideoLlavaCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, video_hidden_states=outputs.video_hidden_states, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values_images=None, pixel_values_videos=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) if cache_position[0] == 0: # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model model_inputs["pixel_values_images"] = pixel_values_images model_inputs["pixel_values_videos"] = pixel_values_videos return model_inputs @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask __all__ = ["VideoLlavaPreTrainedModel", "VideoLlavaModel", "VideoLlavaForConditionalGeneration"]
transformers/src/transformers/models/video_llava/modeling_video_llava.py/0
{ "file_path": "transformers/src/transformers/models/video_llava/modeling_video_llava.py", "repo_id": "transformers", "token_count": 13778 }
536
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for ViLT. """ import warnings from typing import Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class ViltProcessor(ProcessorMixin): r""" Constructs a ViLT processor which wraps a BERT tokenizer and ViLT image processor into a single processor. [`ViltProcessor`] offers all the functionalities of [`ViltImageProcessor`] and [`BertTokenizerFast`]. See the docstring of [`~ViltProcessor.__call__`] and [`~ViltProcessor.decode`] for more information. Args: image_processor (`ViltImageProcessor`, *optional*): An instance of [`ViltImageProcessor`]. The image processor is a required input. tokenizer (`BertTokenizerFast`, *optional*): An instance of ['BertTokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "ViltImageProcessor" tokenizer_class = ("BertTokenizer", "BertTokenizerFast") def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__( self, images, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchEncoding: """ This method uses [`ViltImageProcessor.__call__`] method to prepare image(s) for the model, and [`BertTokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. """ encoding = self.tokenizer( text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) # add pixel_values + pixel_mask encoding_image_processor = self.image_processor(images, return_tensors=return_tensors) encoding.update(encoding_image_processor) return encoding @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor __all__ = ["ViltProcessor"]
transformers/src/transformers/models/vilt/processing_vilt.py/0
{ "file_path": "transformers/src/transformers/models/vilt/processing_vilt.py", "repo_id": "transformers", "token_count": 2065 }
537
# coding=utf-8 # Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ViT MAE model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ViTMAEConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ViTMAEModel`]. It is used to instantiate an ViT MAE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViT [facebook/vit-mae-base](https://huggingface.co/facebook/vit-mae-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. decoder_num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the decoder. decoder_hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the decoder. decoder_num_hidden_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the decoder. decoder_intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder. mask_ratio (`float`, *optional*, defaults to 0.75): The ratio of the number of masked tokens in the input sequence. norm_pix_loss (`bool`, *optional*, defaults to `False`): Whether or not to train with normalized pixels (see Table 3 in the paper). Using normalized pixels improved representation quality in the experiments of the authors. Example: ```python >>> from transformers import ViTMAEConfig, ViTMAEModel >>> # Initializing a ViT MAE vit-mae-base style configuration >>> configuration = ViTMAEConfig() >>> # Initializing a model (with random weights) from the vit-mae-base style configuration >>> model = ViTMAEModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vit_mae" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, decoder_num_attention_heads=16, decoder_hidden_size=512, decoder_num_hidden_layers=8, decoder_intermediate_size=2048, mask_ratio=0.75, norm_pix_loss=False, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.decoder_num_attention_heads = decoder_num_attention_heads self.decoder_hidden_size = decoder_hidden_size self.decoder_num_hidden_layers = decoder_num_hidden_layers self.decoder_intermediate_size = decoder_intermediate_size self.mask_ratio = mask_ratio self.norm_pix_loss = norm_pix_loss __all__ = ["ViTMAEConfig"]
transformers/src/transformers/models/vit_mae/configuration_vit_mae.py/0
{ "file_path": "transformers/src/transformers/models/vit_mae/configuration_vit_mae.py", "repo_id": "transformers", "token_count": 2418 }
538
# coding=utf-8 # Copyright 2023 HUST-VL and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ViTMatte model.""" from dataclasses import dataclass from typing import Optional import torch from torch import nn from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring from ...utils.backbone_utils import load_backbone from .configuration_vitmatte import VitMatteConfig @dataclass @auto_docstring( custom_intro=""" Class for outputs of image matting models. """ ) class ImageMattingOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Loss. alphas (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Estimated alpha values. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. """ loss: Optional[torch.FloatTensor] = None alphas: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @auto_docstring class VitMattePreTrainedModel(PreTrainedModel): config: VitMatteConfig main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = [] def _init_weights(self, module: nn.Module): if isinstance(module, (nn.Conv2d, nn.BatchNorm2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() class VitMatteBasicConv3x3(nn.Module): """ Basic convolution layers including: Conv3x3, BatchNorm2d, ReLU layers. """ def __init__(self, config, in_channels, out_channels, stride=2, padding=1): super().__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, bias=False, ) self.batch_norm = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps) self.relu = nn.ReLU() def forward(self, hidden_state): hidden_state = self.conv(hidden_state) hidden_state = self.batch_norm(hidden_state) hidden_state = self.relu(hidden_state) return hidden_state class VitMatteConvStream(nn.Module): """ Simple ConvStream containing a series of basic conv3x3 layers to extract detail features. """ def __init__(self, config): super().__init__() # We use a default in-case there isn't a backbone config set. This is for backwards compatibility and # to enable loading HF backbone models. in_channels = 4 if config.backbone_config is not None: in_channels = config.backbone_config.num_channels out_channels = config.convstream_hidden_sizes self.convs = nn.ModuleList() self.conv_chans = [in_channels] + out_channels for i in range(len(self.conv_chans) - 1): in_chan_ = self.conv_chans[i] out_chan_ = self.conv_chans[i + 1] self.convs.append(VitMatteBasicConv3x3(config, in_chan_, out_chan_)) def forward(self, pixel_values): out_dict = {"detailed_feature_map_0": pixel_values} embeddings = pixel_values for i in range(len(self.convs)): embeddings = self.convs[i](embeddings) name_ = "detailed_feature_map_" + str(i + 1) out_dict[name_] = embeddings return out_dict class VitMatteFusionBlock(nn.Module): """ Simple fusion block to fuse features from ConvStream and Plain Vision Transformer. """ def __init__(self, config, in_channels, out_channels): super().__init__() self.conv = VitMatteBasicConv3x3(config, in_channels, out_channels, stride=1, padding=1) def forward(self, features, detailed_feature_map): upscaled_features = nn.functional.interpolate(features, scale_factor=2, mode="bilinear", align_corners=False) out = torch.cat([detailed_feature_map, upscaled_features], dim=1) out = self.conv(out) return out class VitMatteHead(nn.Module): """ Simple Matting Head, containing only conv3x3 and conv1x1 layers. """ def __init__(self, config): super().__init__() in_channels = config.fusion_hidden_sizes[-1] mid_channels = 16 self.matting_convs = nn.Sequential( nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(mid_channels), nn.ReLU(True), nn.Conv2d(mid_channels, 1, kernel_size=1, stride=1, padding=0), ) def forward(self, hidden_state): hidden_state = self.matting_convs(hidden_state) return hidden_state class VitMatteDetailCaptureModule(nn.Module): """ Simple and lightweight Detail Capture Module for ViT Matting. """ def __init__(self, config): super().__init__() if len(config.fusion_hidden_sizes) != len(config.convstream_hidden_sizes) + 1: raise ValueError( "The length of fusion_hidden_sizes should be equal to the length of convstream_hidden_sizes + 1." ) self.config = config self.convstream = VitMatteConvStream(config) self.conv_chans = self.convstream.conv_chans self.fusion_blocks = nn.ModuleList() self.fusion_channels = [config.hidden_size] + config.fusion_hidden_sizes for i in range(len(self.fusion_channels) - 1): self.fusion_blocks.append( VitMatteFusionBlock( config=config, in_channels=self.fusion_channels[i] + self.conv_chans[-(i + 1)], out_channels=self.fusion_channels[i + 1], ) ) self.matting_head = VitMatteHead(config) def forward(self, features, pixel_values): detail_features = self.convstream(pixel_values) for i in range(len(self.fusion_blocks)): detailed_feature_map_name = "detailed_feature_map_" + str(len(self.fusion_blocks) - i - 1) features = self.fusion_blocks[i](features, detail_features[detailed_feature_map_name]) alphas = torch.sigmoid(self.matting_head(features)) return alphas @auto_docstring( custom_intro=""" ViTMatte framework leveraging any vision backbone e.g. for ADE20k, CityScapes. """ ) class VitMatteForImageMatting(VitMattePreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.backbone = load_backbone(config) self.decoder = VitMatteDetailCaptureModule(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ): r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth image matting for computing the loss. Examples: ```python >>> from transformers import VitMatteImageProcessor, VitMatteForImageMatting >>> import torch >>> from PIL import Image >>> from huggingface_hub import hf_hub_download >>> processor = VitMatteImageProcessor.from_pretrained("hustvl/vitmatte-small-composition-1k") >>> model = VitMatteForImageMatting.from_pretrained("hustvl/vitmatte-small-composition-1k") >>> filepath = hf_hub_download( ... repo_id="hf-internal-testing/image-matting-fixtures", filename="image.png", repo_type="dataset" ... ) >>> image = Image.open(filepath).convert("RGB") >>> filepath = hf_hub_download( ... repo_id="hf-internal-testing/image-matting-fixtures", filename="trimap.png", repo_type="dataset" ... ) >>> trimap = Image.open(filepath).convert("L") >>> # prepare image + trimap for the model >>> inputs = processor(images=image, trimaps=trimap, return_tensors="pt") >>> with torch.no_grad(): ... alphas = model(**inputs).alphas >>> print(alphas.shape) torch.Size([1, 1, 640, 960]) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions loss = None if labels is not None: raise NotImplementedError("Training is not yet supported") outputs = self.backbone.forward_with_filtered_kwargs( pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions ) features = outputs.feature_maps[-1] alphas = self.decoder(features, pixel_values) if not return_dict: output = (alphas,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageMattingOutput( loss=loss, alphas=alphas, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["VitMattePreTrainedModel", "VitMatteForImageMatting"]
transformers/src/transformers/models/vitmatte/modeling_vitmatte.py/0
{ "file_path": "transformers/src/transformers/models/vitmatte/modeling_vitmatte.py", "repo_id": "transformers", "token_count": 4418 }
539
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Flax ViViT checkpoints from the original repository to PyTorch. URL: https://github.com/google-research/scenic/tree/main/scenic/projects/vivit """ import argparse import json import os.path from collections import OrderedDict import numpy as np import requests import torch from flax.training.checkpoints import restore_checkpoint from huggingface_hub import hf_hub_download from transformers import VivitConfig, VivitForVideoClassification, VivitImageProcessor from transformers.image_utils import PILImageResampling def download_checkpoint(path): url = "https://storage.googleapis.com/scenic-bucket/vivit/kinetics_400/vivit_base_16x2_unfactorized/checkpoint" with open(path, "wb") as f: with requests.get(url, stream=True) as req: for chunk in req.iter_content(chunk_size=2048): f.write(chunk) def get_vivit_config() -> VivitConfig: config = VivitConfig() config.num_labels = 400 repo_id = "huggingface/label-files" filename = "kinetics400-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config # We will verify our results on a video of eating spaghetti # Frame indices used: [ 47, 51, 55, 59, 63, 67, 71, 75, 80, 84, 88, 92, 96, 100, 104, 108, 113, 117, # 121, 125, 129, 133, 137, 141, 146, 150, 154, 158, 162, 166, 170, 174] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset" ) video = np.load(file) return list(video) def transform_attention(current: np.ndarray): if np.ndim(current) == 2: return transform_attention_bias(current) elif np.ndim(current) == 3: return transform_attention_kernel(current) else: raise Exception(f"Invalid number of dimensions: {np.ndim(current)}") def transform_attention_bias(current: np.ndarray): return current.flatten() def transform_attention_kernel(current: np.ndarray): return np.reshape(current, (current.shape[0], current.shape[1] * current.shape[2])).T def transform_attention_output_weight(current: np.ndarray): return np.reshape(current, (current.shape[0] * current.shape[1], current.shape[2])).T def transform_state_encoder_block(state_dict, i): state = state_dict["optimizer"]["target"]["Transformer"][f"encoderblock_{i}"] prefix = f"encoder.layer.{i}." new_state = { prefix + "intermediate.dense.bias": state["MlpBlock_0"]["Dense_0"]["bias"], prefix + "intermediate.dense.weight": np.transpose(state["MlpBlock_0"]["Dense_0"]["kernel"]), prefix + "output.dense.bias": state["MlpBlock_0"]["Dense_1"]["bias"], prefix + "output.dense.weight": np.transpose(state["MlpBlock_0"]["Dense_1"]["kernel"]), prefix + "layernorm_before.bias": state["LayerNorm_0"]["bias"], prefix + "layernorm_before.weight": state["LayerNorm_0"]["scale"], prefix + "layernorm_after.bias": state["LayerNorm_1"]["bias"], prefix + "layernorm_after.weight": state["LayerNorm_1"]["scale"], prefix + "attention.attention.query.bias": transform_attention( state["MultiHeadDotProductAttention_0"]["query"]["bias"] ), prefix + "attention.attention.query.weight": transform_attention( state["MultiHeadDotProductAttention_0"]["query"]["kernel"] ), prefix + "attention.attention.key.bias": transform_attention( state["MultiHeadDotProductAttention_0"]["key"]["bias"] ), prefix + "attention.attention.key.weight": transform_attention( state["MultiHeadDotProductAttention_0"]["key"]["kernel"] ), prefix + "attention.attention.value.bias": transform_attention( state["MultiHeadDotProductAttention_0"]["value"]["bias"] ), prefix + "attention.attention.value.weight": transform_attention( state["MultiHeadDotProductAttention_0"]["value"]["kernel"] ), prefix + "attention.output.dense.bias": state["MultiHeadDotProductAttention_0"]["out"]["bias"], prefix + "attention.output.dense.weight": transform_attention_output_weight( state["MultiHeadDotProductAttention_0"]["out"]["kernel"] ), } return new_state def get_n_layers(state_dict): return sum([1 if "encoderblock_" in k else 0 for k in state_dict["optimizer"]["target"]["Transformer"]]) def transform_state(state_dict, classification_head=False): transformer_layers = get_n_layers(state_dict) new_state = OrderedDict() new_state["layernorm.bias"] = state_dict["optimizer"]["target"]["Transformer"]["encoder_norm"]["bias"] new_state["layernorm.weight"] = state_dict["optimizer"]["target"]["Transformer"]["encoder_norm"]["scale"] new_state["embeddings.patch_embeddings.projection.weight"] = np.transpose( state_dict["optimizer"]["target"]["embedding"]["kernel"], (4, 3, 0, 1, 2) ) new_state["embeddings.patch_embeddings.projection.bias"] = state_dict["optimizer"]["target"]["embedding"]["bias"] new_state["embeddings.cls_token"] = state_dict["optimizer"]["target"]["cls"] new_state["embeddings.position_embeddings"] = state_dict["optimizer"]["target"]["Transformer"]["posembed_input"][ "pos_embedding" ] for i in range(transformer_layers): new_state.update(transform_state_encoder_block(state_dict, i)) if classification_head: new_state = {"vivit." + k: v for k, v in new_state.items()} new_state["classifier.weight"] = np.transpose(state_dict["optimizer"]["target"]["output_projection"]["kernel"]) new_state["classifier.bias"] = np.transpose(state_dict["optimizer"]["target"]["output_projection"]["bias"]) return {k: torch.tensor(v) for k, v in new_state.items()} # checks that image processor settings are the same as in the original implementation # original: https://github.com/google-research/scenic/blob/main/scenic/projects/vivit/data/video_tfrecord_dataset.py # dataset specific config: # https://github.com/google-research/scenic/blob/main/scenic/projects/vivit/configs/kinetics400/vivit_base_k400.py def get_processor() -> VivitImageProcessor: extractor = VivitImageProcessor() assert extractor.do_resize is True assert extractor.size == {"shortest_edge": 256} assert extractor.do_center_crop is True assert extractor.crop_size == {"width": 224, "height": 224} assert extractor.resample == PILImageResampling.BILINEAR # here: https://github.com/deepmind/dmvr/blob/master/dmvr/modalities.py # one can seen that add_image has default values for normalization_mean and normalization_std set to 0 and 1 # which effectively means no normalization (and ViViT does not overwrite those when calling this func) assert extractor.do_normalize is False assert extractor.do_rescale is True assert extractor.rescale_factor == 1 / 255 # zero-centering = True in original implementation assert extractor.do_zero_centering is True return extractor def convert(output_path: str): flax_model_path = "checkpoint" if not os.path.exists(flax_model_path): download_checkpoint(flax_model_path) state_dict = restore_checkpoint(flax_model_path, None) new_state = transform_state(state_dict, classification_head=True) config = get_vivit_config() assert config.image_size == 224 assert config.num_frames == 32 model = VivitForVideoClassification(config) model.load_state_dict(new_state) model.eval() extractor = get_processor() video = prepare_video() inputs = extractor(video, return_tensors="pt") outputs = model(**inputs) expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([-1.0543, 2.0764, -0.2104, 0.4439, -0.9658]) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :5], expected_slice, atol=1e-4), outputs.logits[0, :5] model.save_pretrained(output_path) extractor.save_pretrained(output_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--output_model_name", "-o", type=str, help="Output path for the converted HuggingFace model") args = parser.parse_args() convert(args.output_model_name)
transformers/src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py", "repo_id": "transformers", "token_count": 3445 }
540
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Wav2Vec2Conformer model configuration""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Wav2Vec2ConformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Wav2Vec2ConformerModel`]. It is used to instantiate an Wav2Vec2Conformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2Conformer [facebook/wav2vec2-conformer-rel-pos-large](https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*): Vocabulary size of the Wav2Vec2Conformer model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Wav2Vec2ConformerModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`Wav2Vec2ConformerModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`Wav2Vec2ConformerForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for quantized feature encoder states. conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://huggingface.co/papers/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0),: The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' num_codevectors_per_group (`int`, *optional*, defaults to 320): Number of entries in each quantization codebook (group). num_codevector_groups (`int`, *optional*, defaults to 2): Number of codevector groups for product codevector quantization. contrastive_logits_temperature (`float`, *optional*, defaults to 0.1): The temperature *kappa* in the contrastive loss. feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the output of the feature encoder that's used by the quantizer. num_negatives (`int`, *optional*, defaults to 100): Number of negative samples for the contrastive loss. codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the quantized feature vectors. proj_codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the final projection of both the quantized and the transformer features. diversity_loss_weight (`int`, *optional*, defaults to 0.1): The weight of the codebook diversity loss component. ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`Wav2Vec2ConformerForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`Wav2Vec2ConformerForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`Wav2Vec2ConformerForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`tuple[int]` or `list[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. add_adapter (`bool`, *optional*, defaults to `False`): Whether a convolutional network should be stacked on top of the Wav2Vec2Conformer Encoder. Can be very useful for warm-starting Wav2Vec2Conformer for SpeechEncoderDecoder models. adapter_kernel_size (`int`, *optional*, defaults to 3): Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. adapter_stride (`int`, *optional*, defaults to 2): Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. num_adapter_layers (`int`, *optional*, defaults to 3): Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is True`. output_hidden_size (`int`, *optional*): Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant if `add_adapter is True`. position_embeddings_type (`str`, *optional*, defaults to `"relative"`): Can be specified to `relative` or `rotary` for relative or rotary position embeddings respectively. If left `None` no relative position embedding is applied. rotary_embedding_base (`int`, *optional*, defaults to 10000): If `"rotary"` position embeddings are used, defines the size of the embedding base. max_source_positions (`int`, *optional*, defaults to 5000): if `"relative"` position embeddings are used, defines the maximum source input positions. conv_depthwise_kernel_size (`int`, *optional*, defaults to 31): Kernel size of convolutional depthwise 1D layer in Conformer blocks. conformer_conv_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all convolutional layers in Conformer blocks. Example: ```python >>> from transformers import Wav2Vec2ConformerConfig, Wav2Vec2ConformerModel >>> # Initializing a Wav2Vec2Conformer facebook/wav2vec2-conformer-rel-pos-large style configuration >>> configuration = Wav2Vec2ConformerConfig() >>> # Initializing a model (with random weights) from the facebook/wav2vec2-conformer-rel-pos-large style configuration >>> model = Wav2Vec2ConformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "wav2vec2-conformer" def __init__( self, vocab_size=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction="sum", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=3, output_hidden_size=None, position_embeddings_type="relative", rotary_embedding_base=10000, max_source_positions=5000, conv_depthwise_kernel_size=31, conformer_conv_dropout=0.1, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size self.use_weighted_layer_sum = use_weighted_layer_sum self.max_source_positions = max_source_positions self.position_embeddings_type = position_embeddings_type self.rotary_embedding_base = rotary_embedding_base if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # Conformer-block related self.conv_depthwise_kernel_size = conv_depthwise_kernel_size self.conformer_conv_dropout = conformer_conv_dropout # fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # parameters for pretraining with codevector quantized representations self.num_codevectors_per_group = num_codevectors_per_group self.num_codevector_groups = num_codevector_groups self.contrastive_logits_temperature = contrastive_logits_temperature self.feat_quantizer_dropout = feat_quantizer_dropout self.num_negatives = num_negatives self.codevector_dim = codevector_dim self.proj_codevector_dim = proj_codevector_dim self.diversity_loss_weight = diversity_loss_weight # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # adapter self.add_adapter = add_adapter self.adapter_kernel_size = adapter_kernel_size self.adapter_stride = adapter_stride self.num_adapter_layers = num_adapter_layers self.output_hidden_size = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. self.classifier_proj_size = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) __all__ = ["Wav2Vec2ConformerConfig"]
transformers/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py", "repo_id": "transformers", "token_count": 8158 }
541
#!/usr/bin/env python """Converts a Whisper model in OpenAI format to Hugging Face format.""" # Copyright 2022 The HuggingFace Inc. team and the OpenAI team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import io import json import os import tempfile import urllib import warnings from typing import Any, Optional import torch from huggingface_hub.utils import insecure_hashlib from torch import nn from tqdm import tqdm from transformers import ( GenerationConfig, WhisperConfig, WhisperFeatureExtractor, WhisperForConditionalGeneration, WhisperProcessor, WhisperTokenizer, WhisperTokenizerFast, ) from transformers.models.whisper.tokenization_whisper import LANGUAGES, bytes_to_unicode from transformers.utils.import_utils import _is_package_available _MODELS = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", "large-v3": "https://openaipublic.azureedge.net/main/whisper/models/e5b1a55b89c1367dacf97e3e19bfd829a01529dbfdeefa8caeb59b3f1b81dadb/large-v3.pt", } _TOKENIZERS = { "multilingual": "https://raw.githubusercontent.com/openai/whisper/main/whisper/assets/multilingual.tiktoken", "english": "https://raw.githubusercontent.com/openai/whisper/main/whisper/assets/gpt2.tiktoken", } def _get_generation_config( is_multilingual: bool, num_languages: int = 100, openai_version: Optional[str] = None, ) -> GenerationConfig: """ Loads the appropriate generation config from HF repo """ if openai_version is not None: repo = f"openai/whisper-{openai_version}" elif not is_multilingual: repo = "openai/whisper-medium.en" elif num_languages < 100: repo = "openai/whisper-large-v2" else: repo = "openai/whisper-large-v3" gen_cfg = GenerationConfig.from_pretrained(repo) if openai_version is None: gen_cfg.alignment_heads = None warnings.warn( "Alignment heads have not been included in the generation config, since they are available " "only for the original OpenAI checkpoints." "If you want to use word-level timestamps with a custom version of Whisper," "see https://github.com/openai/whisper/blob/main/notebooks/Multilingual_ASR.ipynb" "for the example of how to produce word-level timestamps manually." ) return gen_cfg def remove_ignore_keys_(state_dict): ignore_keys = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(k, None) WHISPER_MAPPING = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def rename_keys(s_dict): keys = list(s_dict.keys()) for key in keys: new_key = key for k, v in WHISPER_MAPPING.items(): if k in key: new_key = new_key.replace(k, v) print(f"{key} -> {new_key}") s_dict[new_key] = s_dict.pop(key) return s_dict def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def _download(url: str, root: str) -> Any: os.makedirs(root, exist_ok=True) filename = os.path.basename(url) expected_sha256 = url.split("/")[-2] download_target = os.path.join(root, filename) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): model_bytes = open(download_target, "rb").read() if insecure_hashlib.sha256(model_bytes).hexdigest() == expected_sha256: return torch.load(io.BytesIO(model_bytes), weights_only=True) else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm( total=int(source.info().get("Content-Length")), ncols=80, unit="iB", unit_scale=True, unit_divisor=1024 ) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) model_bytes = open(download_target, "rb").read() if insecure_hashlib.sha256(model_bytes).hexdigest() != expected_sha256: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not match. Please retry loading the model." ) return torch.load(io.BytesIO(model_bytes), weights_only=True) def convert_openai_whisper_to_tfms( checkpoint_path, pytorch_dump_folder_path ) -> tuple[WhisperForConditionalGeneration, bool, int]: if ".pt" not in checkpoint_path: root = os.path.dirname(pytorch_dump_folder_path) or "." original_checkpoint = _download(_MODELS[checkpoint_path], root) openai_version = checkpoint_path else: original_checkpoint = torch.load(checkpoint_path, map_location="cpu", weights_only=True) openai_version = None dimensions = original_checkpoint["dims"] state_dict = original_checkpoint["model_state_dict"] proj_out_weights = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(state_dict) rename_keys(state_dict) tie_embeds = True ffn_dim = state_dict["decoder.layers.0.fc1.weight"].shape[0] # a hacky way to properly set up the bos/eos/pad token ids in the model endoftext_id = 50257 if dimensions["n_vocab"] > 51865 else 50256 config = WhisperConfig( vocab_size=dimensions["n_vocab"], encoder_ffn_dim=ffn_dim, decoder_ffn_dim=ffn_dim, num_mel_bins=dimensions["n_mels"], d_model=dimensions["n_audio_state"], max_target_positions=dimensions["n_text_ctx"], encoder_layers=dimensions["n_audio_layer"], encoder_attention_heads=dimensions["n_audio_head"], decoder_layers=dimensions["n_text_layer"], decoder_attention_heads=dimensions["n_text_head"], max_source_positions=dimensions["n_audio_ctx"], eos_token_id=endoftext_id, bos_token_id=endoftext_id, pad_token_id=endoftext_id, decoder_start_token_id=endoftext_id + 1, ) model = WhisperForConditionalGeneration(config) missing, unexpected = model.model.load_state_dict(state_dict, strict=False) if len(missing) > 0 and not set(missing) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," f" but all the following weights are missing {missing}" ) if tie_embeds: model.proj_out = make_linear_from_emb(model.model.decoder.embed_tokens) else: model.proj_out.weight.data = proj_out_weights # determine those parameters from a model checkpoint as Whisper repo does is_multilingual = model.config.vocab_size >= 51865 num_languages = model.config.vocab_size - 51765 - int(is_multilingual) model.generation_config = _get_generation_config( is_multilingual, num_languages, openai_version, ) return model, is_multilingual, num_languages # Adapted from https://github.com/openai/tiktoken/issues/60#issuecomment-1499977960 def _bpe(mergeable_ranks, token: bytes, max_rank=None) -> list[bytes]: parts = [bytes([b]) for b in token] while True: min_idx = None min_rank = None for i, pair in enumerate(zip(parts[:-1], parts[1:])): rank = mergeable_ranks.get(pair[0] + pair[1]) if rank is not None and (min_rank is None or rank < min_rank): min_idx = i min_rank = rank if min_rank is None or (max_rank is not None and min_rank >= max_rank): break assert min_idx is not None parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2 :] return parts def convert_tiktoken_bpe_to_hf(tiktoken_url: str): bpe_ranks = load_tiktoken_bpe(tiktoken_url) byte_encoder = bytes_to_unicode() def token_bytes_to_string(b): return "".join([byte_encoder[ord(char)] for char in b.decode("latin-1")]) merges = [] vocab = {} for token, rank in bpe_ranks.items(): vocab[token_bytes_to_string(token)] = rank if len(token) == 1: continue merged = tuple(_bpe(bpe_ranks, token, max_rank=rank)) if len(merged) == 2: # account for empty token merges.append(" ".join(map(token_bytes_to_string, merged))) return vocab, merges def convert_tiktoken_to_hf( multilingual: bool = True, num_languages: int = 100, time_precision=0.02 ) -> WhisperTokenizer: # requires whisper, unless we use the path to the tiktoken file tiktoken_tokenizer_path = _TOKENIZERS["multilingual" if multilingual else "english"] start_of_transcript = ["<|endoftext|>", "<|startoftranscript|>"] control_tokens = [ "<|translate|>", "<|transcribe|>", "<|startoflm|>", "<|startofprev|>", "<|nospeech|>", "<|notimestamps|>", ] # these are special tokens, not normalized language_tokens = [f"<|{k}|>" for k in list(LANGUAGES)[:num_languages]] # These are not special but normalized timestamp_tokens = [("<|%.2f|>" % (i * time_precision)) for i in range(1500 + 1)] vocab, merges = convert_tiktoken_bpe_to_hf(tiktoken_tokenizer_path) with tempfile.TemporaryDirectory() as tmpdirname: vocab_file = f"{tmpdirname}/vocab.json" merge_file = f"{tmpdirname}/merges.txt" with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens in merges: writer.write(bpe_tokens + "\n") hf_tokenizer = WhisperTokenizer(vocab_file, merge_file) hf_tokenizer.add_tokens(start_of_transcript + language_tokens + control_tokens, special_tokens=True) hf_tokenizer.add_tokens(timestamp_tokens, special_tokens=False) return hf_tokenizer if __name__ == "__main__": parser = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--convert_preprocessor", type=bool, default=False, help="Whether or not the preprocessor (tokenizer + feature extractor) should be converted along with the model.", ) args = parser.parse_args() model, is_multilingual, num_languages = convert_openai_whisper_to_tfms( args.checkpoint_path, args.pytorch_dump_folder_path ) if args.convert_preprocessor: try: if not _is_package_available("tiktoken"): raise ModuleNotFoundError( """`tiktoken` is not installed, use `pip install tiktoken` to convert the tokenizer""" ) except Exception as e: print(e) else: from tiktoken.load import load_tiktoken_bpe tokenizer = convert_tiktoken_to_hf(is_multilingual, num_languages) feature_extractor = WhisperFeatureExtractor( feature_size=model.config.num_mel_bins, # the rest of default parameters are the same as hardcoded in openai/whisper ) processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(args.pytorch_dump_folder_path) # save fast tokenizer as well fast_tokenizer = WhisperTokenizerFast.from_pretrained(args.pytorch_dump_folder_path) fast_tokenizer.save_pretrained(args.pytorch_dump_folder_path, legacy_format=False) model.save_pretrained(args.pytorch_dump_folder_path)
transformers/src/transformers/models/whisper/convert_openai_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/whisper/convert_openai_to_hf.py", "repo_id": "transformers", "token_count": 6453 }
542
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Xcodec model configuration""" import math from typing import Optional, Union import numpy as np from transformers import AutoConfig, DacConfig, HubertConfig, WavLMConfig from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class XcodecConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`XcodecModel`]. It is used to instantiate a Xcodec model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [Manel/X-Codec](https://huggingface.co/Manel/X-Codec) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: target_bandwidths (`List[float]`, *optional*, defaults to `[0.5, 1, 1.5, 2, 4]`): The range of different bandwidths (in kbps) the model can encode audio with. sample_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio waveform should be digitalized, in hertz (Hz). kernel_size (`int`, *optional*, defaults to 3): Kernel size for the initial semantic convolution. channel_ratios (`List[float]`, *optional*, defaults to `[1, 1]`): Expansion factors for the number of output channels in each semantic block. strides (`List[int]`, *optional*, defaults to `[1, 1]`): Strides for each semantic encoder block. block_dilations (`List[int]`, *optional*, defaults to `[1, 1]`): Dilation factors for the residual units in semantic blocks. unit_kernel_size (`int`, *optional*, defaults to 3): Kernel size inside each ResidualUnit in semantic blocks. codebook_size (`int`, *optional*, defaults to 1024): Number of entries in each residual quantizer's codebook. codebook_dim (`int`, *optional*): Dimensionality of each codebook vector. Defaults to sum of hidden size of acoustic and semantic models. initializer_range (`float`, *optional*, defaults to 0.02): Standard deviation of the truncated normal initializer for all weight matrices. acoustic_model_config (`Union[Dict, DacConfig]`, *optional*): An instance of the configuration for the acoustic (DAC) model. semantic_model_config (`Union[Dict, HubertConfig, WavLMConfig]`, *optional*): An instance of the configuration object for the semantic (HuBERT) model. Example: ```python >>> from transformers import XcodecModel, XcodecConfig >>> # Initializing configuration >>> configuration = XcodecConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = XcodecModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "xcodec" sub_configs = { "acoustic_model_config": DacConfig, "semantic_model_config": AutoConfig, } def __init__( self, target_bandwidths: Optional[list[float]] = None, sample_rate: int = 16000, kernel_size: int = 3, channel_ratios: list[float] = [1, 1], strides: list[int] = [1, 1], block_dilations: list[int] = [1, 1], unit_kernel_size: int = 3, codebook_size: int = 1024, codebook_dim: Optional[int] = None, initializer_range: float = 0.02, acoustic_model_config: Union[dict, DacConfig] = None, semantic_model_config: Union[dict, HubertConfig] = None, **kwargs, ): super().__init__(**kwargs) if acoustic_model_config is None: self.acoustic_model_config = DacConfig( encoder_hidden_size=64, # NOTE: original DAC uses [2, 4, 8, 8] `downsampling ratios`, namely reverse of `upsampling_ratios` # (not sure if intentional by Xcodec but we keep it) downsampling_ratios=[8, 5, 4, 2], decoder_hidden_size=1024, upsampling_ratios=[8, 5, 4, 2], hidden_size=256, ) elif isinstance(acoustic_model_config, dict): self.acoustic_model_config = DacConfig(**acoustic_model_config) elif isinstance(acoustic_model_config, DacConfig): self.acoustic_model_config = acoustic_model_config else: raise ValueError( f"acoustic_model_config must be a dict or DacConfig instance, but got {type(acoustic_model_config)}" ) if semantic_model_config is None: self.semantic_model_config = HubertConfig() elif isinstance(semantic_model_config, dict): if "_name_or_path" in semantic_model_config: # If the config is a path, load it using AutoConfig self.semantic_model_config = AutoConfig.from_pretrained(semantic_model_config["_name_or_path"]) else: # assume HubertConfig as probably created from scratch logger.warning( "Could not determine semantic model type from config architecture. Defaulting to `HubertConfig`." ) self.semantic_model_config = HubertConfig(**semantic_model_config) elif isinstance(semantic_model_config, WavLMConfig) or isinstance(semantic_model_config, HubertConfig): self.semantic_model_config = semantic_model_config else: raise ValueError( f"semantic_model_config must be a dict, HubertConfig, or WavLMConfig instance, but got {type(semantic_model_config)}" ) if target_bandwidths is None: target_bandwidths = [0.5, 1, 1.5, 2, 4] self.target_bandwidths = target_bandwidths self.sample_rate = sample_rate self.kernel_size = kernel_size self.channel_ratios = channel_ratios self.strides = strides self.block_dilations = block_dilations self.unit_kernel_size = unit_kernel_size self.codebook_size = codebook_size self.initializer_range = initializer_range if codebook_dim is None: codebook_dim = self.acoustic_model_config.hidden_size + self.semantic_model_config.hidden_size self.codebook_dim = codebook_dim @property def frame_rate(self) -> int: return math.ceil(self.sample_rate / self.hop_length) @property def semantic_hidden_size(self) -> int: return self.semantic_model_config.hidden_size @property def hop_length(self) -> int: return int(np.prod(self.acoustic_model_config.downsampling_ratios)) @property def codebook_nbits(self) -> int: return math.ceil(math.log2(self.codebook_size)) @property def hidden_size(self) -> int: return self.acoustic_model_config.hidden_size + self.semantic_model_config.hidden_size @property def num_quantizers(self) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * self.codebook_nbits)) __all__ = ["XcodecConfig"]
transformers/src/transformers/models/xcodec/configuration_xcodec.py/0
{ "file_path": "transformers/src/transformers/models/xcodec/configuration_xcodec.py", "repo_id": "transformers", "token_count": 3102 }
543
# coding=utf-8 # Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for XLM.""" import json import os import re import sys import unicodedata from typing import Optional from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def lowercase_and_remove_accent(text): """ Lowercase and strips accents from a piece of text based on https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py """ text = " ".join(text) text = text.lower() text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output).lower().split(" ") def replace_unicode_punct(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl """ text = text.replace(",", ",") text = re.sub(r"。\s*", ". ", text) text = text.replace("、", ",") text = text.replace("”", '"') text = text.replace("“", '"') text = text.replace("∶", ":") text = text.replace(":", ":") text = text.replace("?", "?") text = text.replace("《", '"') text = text.replace("》", '"') text = text.replace(")", ")") text = text.replace("!", "!") text = text.replace("(", "(") text = text.replace(";", ";") text = text.replace("1", "1") text = text.replace("」", '"') text = text.replace("「", '"') text = text.replace("0", "0") text = text.replace("3", "3") text = text.replace("2", "2") text = text.replace("5", "5") text = text.replace("6", "6") text = text.replace("9", "9") text = text.replace("7", "7") text = text.replace("8", "8") text = text.replace("4", "4") text = re.sub(r".\s*", ". ", text) text = text.replace("~", "~") text = text.replace("’", "'") text = text.replace("…", "...") text = text.replace("━", "-") text = text.replace("〈", "<") text = text.replace("〉", ">") text = text.replace("【", "[") text = text.replace("】", "]") text = text.replace("%", "%") return text def remove_non_printing_char(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl """ output = [] for char in text: cat = unicodedata.category(char) if cat.startswith("C"): continue output.append(char) return "".join(output) def romanian_preprocessing(text): """Sennrich's WMT16 scripts for Romanian preprocessing, used by model `FacebookAI/xlm-mlm-enro-1024`""" # https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/normalise-romanian.py text = text.replace("\u015e", "\u0218").replace("\u015f", "\u0219") text = text.replace("\u0162", "\u021a").replace("\u0163", "\u021b") # https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/remove-diacritics.py text = text.replace("\u0218", "S").replace("\u0219", "s") # s-comma text = text.replace("\u021a", "T").replace("\u021b", "t") # t-comma text = text.replace("\u0102", "A").replace("\u0103", "a") text = text.replace("\u00c2", "A").replace("\u00e2", "a") text = text.replace("\u00ce", "I").replace("\u00ee", "i") return text class XLMTokenizer(PreTrainedTokenizer): """ Construct an XLM tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following: - Moses preprocessing and tokenization for most supported languages. - Language specific tokenization for Chinese (Jieba), Japanese (KyTea) and Thai (PyThaiNLP). - Optionally lowercases and normalizes all inputs text. - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like "__classify__") to a vocabulary. - The `lang2id` attribute maps the languages supported by the model with their IDs if provided (automatically set for pretrained vocabularies). - The `id2lang` attributes does reverse mapping if provided (automatically set for pretrained vocabularies). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Vocabulary file. merges_file (`str`): Merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"</s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"<special1>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`List[str]`, *optional*, defaults to `['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']`): List of additional special tokens. lang2id (`Dict[str, int]`, *optional*): Dictionary mapping languages string identifiers to their IDs. id2lang (`Dict[int, str]`, *optional*): Dictionary mapping language IDs to their string identifiers. do_lowercase_and_remove_accent (`bool`, *optional*, defaults to `True`): Whether to lowercase and remove accents when tokenizing. """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, merges_file, unk_token="<unk>", bos_token="<s>", sep_token="</s>", pad_token="<pad>", cls_token="</s>", mask_token="<special1>", additional_special_tokens=[ "<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>", ], lang2id=None, id2lang=None, do_lowercase_and_remove_accent=True, **kwargs, ): try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses # cache of sm.MosesPunctNormalizer instance self.cache_moses_punct_normalizer = {} # cache of sm.MosesTokenizer instance self.cache_moses_tokenizer = {} self.lang_with_custom_tokenizer = {"zh", "th", "ja"} # True for current supported model (v1.2.0), False for XLM-17 & 100 self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent self.lang2id = lang2id self.id2lang = id2lang if lang2id is not None and id2lang is not None: assert len(lang2id) == len(id2lang) self.ja_word_tokenizer = None self.zh_word_tokenizer = None with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__( unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, **kwargs, ) @property def do_lower_case(self): return self.do_lowercase_and_remove_accent def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer else: punct_normalizer = self.cache_moses_punct_normalizer[lang] return punct_normalizer.normalize(text) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer else: moses_tokenizer = self.cache_moses_tokenizer[lang] return moses_tokenizer.tokenize(text, return_str=False, escape=False) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text def ja_tokenize(self, text): if self.ja_word_tokenizer is None: try: import Mykytea self.ja_word_tokenizer = Mykytea.Mykytea( f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin" ) except (AttributeError, ImportError): logger.error( "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper" " (https://github.com/chezou/Mykytea-python) with the following steps" ) logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea") logger.error("2. autoreconf -i") logger.error("3. ./configure --prefix=$HOME/local") logger.error("4. make && make install") logger.error("5. pip install kytea") raise return list(self.ja_word_tokenizer.getWS(text)) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + "</w>",) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n </w>": word = "\n</w>" self.cache[token] = word return word def _tokenize(self, text, lang="en", bypass_tokenizer=False): """ Tokenize a string given language code. For Chinese, Japanese and Thai, we use a language specific tokenizer. Otherwise, we use Moses. Details of tokenization: - [sacremoses](https://github.com/alvations/sacremoses): port of Moses - Install with `pip install sacremoses` - [pythainlp](https://github.com/PyThaiNLP/pythainlp): Thai tokenizer - Install with `pip install pythainlp` - [kytea](https://github.com/chezou/Mykytea-python): Japanese tokenizer, wrapper of [KyTea](https://github.com/neubig/kytea) - Install with the following steps: :: git clone git@github.com:neubig/kytea.git && cd kytea autoreconf -i ./configure --prefix=$HOME/local make && make install pip install kytea - [jieba](https://github.com/fxsjy/jieba): Chinese tokenizer (*) - Install with `pip install jieba` (*) The original XLM used [Stanford Segmenter](https://nlp.stanford.edu/software/stanford-segmenter-2018-10-16.zip). However, the wrapper (`nltk.tokenize.stanford_segmenter`) is slow due to JVM overhead, and it will be deprecated. Jieba is a lot faster and pip-installable. Note there is some mismatch with the Stanford Segmenter. It should be fine if you fine-tune the model with Chinese supervisionself. If you want the same exact behaviour, use the original XLM [preprocessing script](https://github.com/facebookresearch/XLM/tree/master/tools) to tokenize the sentence externally, and set `bypass_tokenizer=True` to bypass the tokenizer. Args: - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. Returns: List of tokens. """ if lang and self.lang2id and lang not in self.lang2id: logger.error( "Supplied language code not found in lang2id mapping. Please check that your language is supported by" " the loaded pretrained model." ) if bypass_tokenizer: text = text.split() elif lang not in self.lang_with_custom_tokenizer: text = self.moses_pipeline(text, lang=lang) # TODO: make sure we are using `FacebookAI/xlm-mlm-enro-1024`, since XLM-100 doesn't have this step if lang == "ro": text = romanian_preprocessing(text) text = self.moses_tokenize(text, lang=lang) elif lang == "th": text = self.moses_pipeline(text, lang=lang) try: if "pythainlp" not in sys.modules: from pythainlp.tokenize import word_tokenize as th_word_tokenize else: th_word_tokenize = sys.modules["pythainlp"].word_tokenize except (AttributeError, ImportError): logger.error( "Make sure you install PyThaiNLP (https://github.com/PyThaiNLP/pythainlp) with the following steps" ) logger.error("1. pip install pythainlp") raise text = th_word_tokenize(text) elif lang == "zh": try: if "jieba" not in sys.modules: import jieba else: jieba = sys.modules["jieba"] except (AttributeError, ImportError): logger.error("Make sure you install Jieba (https://github.com/fxsjy/jieba) with the following steps") logger.error("1. pip install jieba") raise text = " ".join(jieba.cut(text)) text = self.moses_pipeline(text, lang=lang) text = text.split() elif lang == "ja": text = self.moses_pipeline(text, lang=lang) text = self.ja_tokenize(text) else: raise ValueError("It should not reach here") if self.do_lowercase_and_remove_accent and not bypass_tokenizer: text = lowercase_and_remove_accent(text) split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = "".join(tokens).replace("</w>", " ").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ bos = [self.bos_token_id] sep = [self.sep_token_id] if token_ids_1 is None: return bos + token_ids_0 + sep return bos + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file def __getstate__(self): state = self.__dict__.copy() state["sm"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses __all__ = ["XLMTokenizer"]
transformers/src/transformers/models/xlm/tokenization_xlm.py/0
{ "file_path": "transformers/src/transformers/models/xlm/tokenization_xlm.py", "repo_id": "transformers", "token_count": 10491 }
544
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch XLNet model. """ import warnings from dataclasses import dataclass from typing import Callable, Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, get_activation from ...generation import GenerationMixin from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ModelOutput, auto_docstring, logging from .configuration_xlnet import XLNetConfig logger = logging.get_logger(__name__) def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, "transformer"): if hasattr(model, "lm_loss"): # We will load also the output bias tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights: # We will load also the sequence summary tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias if ( hasattr(model, "logits_proj") and config.finetuning_task is not None and f"model/regression_{config.finetuning_task}/logit/kernel" in tf_weights ): tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/kernel"] = model.logits_proj.weight tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/bias"] = model.logits_proj.bias # Now load the rest of the transformer model = model.transformer # Embeddings and output tf_to_pt_map.update( { "model/transformer/word_embedding/lookup_table": model.word_embedding.weight, "model/transformer/mask_emb/mask_emb": model.mask_emb, } ) # Transformer blocks for i, b in enumerate(model.layer): layer_str = f"model/transformer/layer_{i}/" tf_to_pt_map.update( { layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.rel_attn.o, layer_str + "rel_attn/q/kernel": b.rel_attn.q, layer_str + "rel_attn/k/kernel": b.rel_attn.k, layer_str + "rel_attn/r/kernel": b.rel_attn.r, layer_str + "rel_attn/v/kernel": b.rel_attn.v, layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight, layer_str + "ff/layer_1/bias": b.ff.layer_1.bias, layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight, layer_str + "ff/layer_2/bias": b.ff.layer_2.bias, } ) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] r_s_list = [] seg_embed_list = [] for b in model.layer: r_r_list.append(b.rel_attn.r_r_bias) r_w_list.append(b.rel_attn.r_w_bias) r_s_list.append(b.rel_attn.r_s_bias) seg_embed_list.append(b.rel_attn.seg_embed) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] r_s_list = [model.r_s_bias] seg_embed_list = [model.seg_embed] tf_to_pt_map.update( { "model/transformer/r_r_bias": r_r_list, "model/transformer/r_w_bias": r_w_list, "model/transformer/r_s_bias": r_s_list, "model/transformer/seg_embed": seg_embed_list, } ) return tf_to_pt_map def load_tf_weights_in_xlnet(model, config, tf_path): """Load tf checkpoints in a pytorch model""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) tf_weights[name] = array # Build TF to PyTorch weights loading map tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights) for name, pointer in tf_to_pt_map.items(): logger.info(f"Importing {name}") if name not in tf_weights: logger.info(f"{name} not in tf pre-trained weights, skipping") continue array = tf_weights[name] # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name): logger.info("Transposing") array = np.transpose(array) if isinstance(pointer, list): # Here we will split the TF weights assert len(pointer) == array.shape[0], ( f"Pointer length {len(pointer)} and array length {array.shape[0]} mismatched" ) for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert p_i.shape == arr_i.shape, ( f"Pointer shape {p_i.shape} and array shape {arr_i.shape} mismatched" ) except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise logger.info(f"Initialize PyTorch weight {name} for layer {i}") p_i.data = torch.from_numpy(arr_i) else: try: assert pointer.shape == array.shape, ( f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/Adam", None) tf_weights.pop(name + "/Adam_1", None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model class XLNetRelativeAttention(nn.Module): def __init__(self, config): super().__init__() if config.d_model % config.n_head != 0: raise ValueError( f"The hidden size ({config.d_model}) is not a multiple of the number of attention " f"heads ({config.n_head}" ) self.n_head = config.n_head self.d_head = config.d_head self.d_model = config.d_model self.scale = 1 / (config.d_head**0.5) self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head)) self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.dropout) def prune_heads(self, heads): raise NotImplementedError @staticmethod def rel_shift(x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = x.shape x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3]) x = x[1:, ...] x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3]) # x = x[:, 0:klen, :, :] x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long)) return x @staticmethod def rel_shift_bnij(x, klen=-1): x_size = x.shape x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2]) x = x[:, :, 1:, :] x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1) # Note: the tensor-slice form was faster in my testing than torch.index_select # However, tracing doesn't like the nature of the slice, and if klen changes # during the run then it'll fail, whereas index_select will be fine. x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long)) # x = x[:, :, :, :klen] return x def rel_attn_core( self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None, output_attentions=False, ): """Core relative positional attention operations.""" # content based attention score ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h) # position based attention score bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r) bd = self.rel_shift_bnij(bd, klen=ac.shape[3]) # segment based attention score if seg_mat is None: ef = 0 else: ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed) ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef) # merge attention scores and perform masking attn_score = (ac + bd + ef) * self.scale if attn_mask is not None: # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask if attn_mask.dtype == torch.float16: attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask) else: attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask) # attention probability attn_prob = nn.functional.softmax(attn_score, dim=3) attn_prob = self.dropout(attn_prob) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask) # attention output attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h) if output_attentions: return attn_vec, torch.einsum("bnij->ijbn", attn_prob) return attn_vec def post_attention(self, h, attn_vec, residual=True): """Post-attention processing.""" # post-attention projection (back to `d_model`) attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o) attn_out = self.dropout(attn_out) if residual: attn_out = attn_out + h output = self.layer_norm(attn_out) return output def forward( self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None, output_attentions=False, ): if g is not None: # Two-stream attention with relative positional encoding. # content based attention score if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content-based key head k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k) # content-based value head v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v) # position-based key head k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r) # h-stream # content-stream query head q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q) # core attention ops attn_vec_h = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_h, attn_prob_h = attn_vec_h # post processing output_h = self.post_attention(h, attn_vec_h) # g-stream # query-stream query head q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q) # core attention ops if target_mapping is not None: q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping) attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping) else: attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g # post processing output_g = self.post_attention(g, attn_vec_g) if output_attentions: attn_prob = attn_prob_h, attn_prob_g else: # Multi-head attention with relative positional encoding if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content heads q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q) k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k) v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v) # positional heads # type casting for fp16 support k_head_r = torch.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r) # core attention ops attn_vec = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec, attn_prob = attn_vec # post processing output_h = self.post_attention(h, attn_vec) output_g = None outputs = (output_h, output_g) if output_attentions: outputs = outputs + (attn_prob,) return outputs class XLNetFeedForward(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.layer_1 = nn.Linear(config.d_model, config.d_inner) self.layer_2 = nn.Linear(config.d_inner, config.d_model) self.dropout = nn.Dropout(config.dropout) if isinstance(config.ff_activation, str): self.activation_function = ACT2FN[config.ff_activation] else: self.activation_function = config.ff_activation def forward(self, inp): output = inp output = self.layer_1(output) output = self.activation_function(output) output = self.dropout(output) output = self.layer_2(output) output = self.dropout(output) output = self.layer_norm(output + inp) return output class XLNetLayer(nn.Module): def __init__(self, config): super().__init__() self.rel_attn = XLNetRelativeAttention(config) self.ff = XLNetFeedForward(config) self.dropout = nn.Dropout(config.dropout) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 def forward( self, output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None, output_attentions=False, ): outputs = self.rel_attn( output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=mems, target_mapping=target_mapping, head_mask=head_mask, output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_g is not None: output_g = apply_chunking_to_forward( self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_g ) output_h = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_h) outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there return outputs def ff_chunk(self, output_x): output_x = self.ff(output_x) return output_x # Copied from transformers.models.xlm.modeling_xlm.XLMPoolerStartLogits with XLM->XLNet class XLNetPoolerStartLogits(nn.Module): """ Compute SQuAD start logits from sequence hidden states. Args: config ([`XLNetConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model. """ def __init__(self, config: XLNetConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, 1) def forward( self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None ) -> torch.FloatTensor: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): The final hidden states of the model. p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. Returns: `torch.FloatTensor`: The start logits for SQuAD. """ x = self.dense(hidden_states).squeeze(-1) if p_mask is not None: if p_mask.dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x # Copied from transformers.models.xlm.modeling_xlm.XLMPoolerEndLogits with XLM->XLNet class XLNetPoolerEndLogits(nn.Module): """ Compute SQuAD end logits from sequence hidden states. Args: config ([`XLNetConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` to use. """ def __init__(self, config: XLNetConfig): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense_1 = nn.Linear(config.hidden_size, 1) def forward( self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, p_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): The final hidden states of the model. start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): The hidden states of the first tokens for the labeled span. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the first token for the labeled span. p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. <Tip> One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides `start_states`. </Tip> Returns: `torch.FloatTensor`: The end logits for SQuAD. """ assert start_states is not None or start_positions is not None, ( "One of start_states, start_positions should be not None" ) if start_positions is not None: slen, hsz = hidden_states.shape[-2:] start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) x = self.activation(x) x = self.LayerNorm(x) x = self.dense_1(x).squeeze(-1) if p_mask is not None: if p_mask.dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x # Copied from transformers.models.xlm.modeling_xlm.XLMPoolerAnswerClass with XLM->XLNet class XLNetPoolerAnswerClass(nn.Module): """ Compute SQuAD 2.0 answer class from classification and start tokens hidden states. Args: config ([`XLNetConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model. """ def __init__(self, config: XLNetConfig): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) def forward( self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, cls_index: Optional[torch.LongTensor] = None, ) -> torch.FloatTensor: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): The final hidden states of the model. start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): The hidden states of the first tokens for the labeled span. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the first token for the labeled span. cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Position of the CLS token for each sentence in the batch. If `None`, takes the last token. <Tip> One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides `start_states`. </Tip> Returns: `torch.FloatTensor`: The SQuAD 2.0 answer class. """ # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample. hsz = hidden_states.shape[-1] assert start_states is not None or start_positions is not None, ( "One of start_states, start_positions should be not None" ) if start_positions is not None: start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) if cls_index is not None: cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) else: cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) x = self.activation(x) x = self.dense_1(x).squeeze(-1) return x # Copied from transformers.models.xlm.modeling_xlm.XLMSequenceSummary with XLM->XLNet class XLNetSequenceSummary(nn.Module): r""" Compute a single vector summary of a sequence hidden states. Args: config ([`XLNetConfig`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. """ def __init__(self, config: XLNetConfig): super().__init__() self.summary_type = getattr(config, "summary_type", "last") if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = nn.Identity() if hasattr(config, "summary_use_proj") and config.summary_use_proj: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) activation_string = getattr(config, "summary_activation", None) self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity() self.first_dropout = nn.Identity() if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = nn.Identity() if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward( self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None ) -> torch.FloatTensor: """ Compute a single vector summary of a sequence hidden states. Args: hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): The hidden states of the last layer. cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. Returns: `torch.FloatTensor`: The summary of the sequence hidden states. """ if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = hidden_states.mean(dim=1) elif self.summary_type == "cls_index": if cls_index is None: cls_index = torch.full_like( hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long, ) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == "attn": raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output @auto_docstring class XLNetPreTrainedModel(PreTrainedModel): config: XLNetConfig load_tf_weights = load_tf_weights_in_xlnet base_model_prefix = "transformer" def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, XLNetRelativeAttention): for param in [ module.q, module.k, module.v, module.o, module.r, module.r_r_bias, module.r_s_bias, module.r_w_bias, module.seg_embed, ]: param.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, XLNetModel): module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range) @dataclass @auto_docstring( custom_intro=""" Output type of [`XLNetModel`]. """ ) class XLNetModelOutput(ModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. """ last_hidden_state: torch.FloatTensor mems: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`XLNetLMHeadModel`]. """ ) class XLNetLMHeadModelOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None mems: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`XLNetForSequenceClassification`]. """ ) class XLNetForSequenceClassificationOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None mems: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`XLNetForTokenClassificationOutput`]. """ ) class XLNetForTokenClassificationOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None mems: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`XLNetForMultipleChoice`]. """ ) class XLNetForMultipleChoiceOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None mems: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`XLNetForQuestionAnsweringSimple`]. """ ) class XLNetForQuestionAnsweringSimpleOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`): Span-end scores (before SoftMax). mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. """ loss: Optional[torch.FloatTensor] = None start_logits: Optional[torch.FloatTensor] = None end_logits: Optional[torch.FloatTensor] = None mems: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`XLNetForQuestionAnswering`]. """ ) class XLNetForQuestionAnsweringOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided): Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top config.start_n_top start token possibilities (beam-search). start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top config.start_n_top start token possibilities (beam-search). end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the `is_impossible` label of the answers. mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. """ loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None mems: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @auto_docstring class XLNetModel(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.mem_len = config.mem_len self.reuse_len = config.reuse_len self.d_model = config.d_model self.same_length = config.same_length self.attn_type = config.attn_type self.bi_data = config.bi_data self.clamp_len = config.clamp_len self.n_layer = config.n_layer self.word_embedding = nn.Embedding(config.vocab_size, config.d_model) self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model)) self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)]) self.dropout = nn.Dropout(config.dropout) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.word_embedding def set_input_embeddings(self, new_embeddings): self.word_embedding = new_embeddings def _prune_heads(self, heads_to_prune): raise NotImplementedError def create_mask(self, qlen, mlen): """ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked. Args: qlen: Sequence length mlen: Mask length :: same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen > ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1] qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1] [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1] v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0] """ mask = torch.ones((qlen, qlen + mlen), device=self.device) if self.same_length: mask_lo = mask[:, :qlen].tril(-1) mask.triu_(mlen + 1) mask[:, :qlen] += mask_lo else: mask.triu_(mlen + 1) return mask def cache_mem(self, curr_out, prev_mem): # cache hidden states into memory. if self.reuse_len is not None and self.reuse_len > 0: curr_out = curr_out[: self.reuse_len] if self.mem_len is None or self.mem_len == 0: # If `use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time # and returns all of the past and current hidden states. cutoff = 0 else: # If `use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden # states. This is the preferred setting for training and long-form generation. cutoff = -self.mem_len if prev_mem is None: # if `use_mems` is active and `mem_len` is defined, the model new_mem = curr_out[cutoff:] else: new_mem = torch.cat([prev_mem, curr_out], dim=0)[cutoff:] return new_mem.detach() @staticmethod def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq) pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = pos_emb.expand(-1, bsz, -1) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None): # create relative positional encoding. freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.int64).float() inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model)) if self.attn_type == "bi": # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif self.attn_type == "uni": # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError(f"Unknown `attn_type` {self.attn_type}.") if self.bi_data: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float() bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.int64).float() if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) if bsz is not None: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2) else: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq) pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1) else: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float() if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) return pos_emb @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete after depreciation warning is removed ) -> Union[tuple, XLNetModelOutput]: r""" mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). input_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. use_mems (`bool`, *optional*): Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden states from previous forward passes to compute attention, which can significantly improve performance for sequential decoding tasks. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems`" " instead.", FutureWarning, ) use_mems = kwargs["use_cache"] if self.training: use_mems = use_mems if use_mems is not None else self.config.use_mems_train else: use_mems = use_mems if use_mems is not None else self.config.use_mems_eval # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_ids = input_ids.transpose(0, 1).contiguous() qlen, bsz = input_ids.shape[0], input_ids.shape[1] elif inputs_embeds is not None: inputs_embeds = inputs_embeds.transpose(0, 1).contiguous() qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen dtype_float = self.dtype device = self.device # Attention mask # causal attention mask if self.attn_type == "uni": attn_mask = self.create_mask(qlen, mlen) attn_mask = attn_mask[:, :, None, None] elif self.attn_type == "bi": attn_mask = None else: raise ValueError(f"Unsupported attention type: {self.attn_type}") # data mask: input mask & perm mask assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) " "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one." if input_mask is None and attention_mask is not None: input_mask = 1.0 - attention_mask if input_mask is not None and perm_mask is not None: data_mask = input_mask[None] + perm_mask elif input_mask is not None and perm_mask is None: data_mask = input_mask[None] elif input_mask is None and perm_mask is not None: data_mask = perm_mask else: data_mask = None if data_mask is not None: # all mems can be attended to if mlen > 0: mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask) data_mask = torch.cat([mems_mask, data_mask], dim=1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = (attn_mask > 0).to(dtype_float) if attn_mask is not None: non_tgt_mask = -torch.eye(qlen).to(attn_mask) if mlen > 0: non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1) non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask) else: non_tgt_mask = None # Word embeddings and prepare h & g hidden states if inputs_embeds is not None: word_emb_k = inputs_embeds else: word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k) if target_mapping is not None: word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k output_g = self.dropout(word_emb_q) else: output_g = None # Segment embedding if token_type_ids is not None: # Convert `token_type_ids` to one-hot `seg_mat` if mlen > 0: mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device) cat_ids = torch.cat([mem_pad, token_type_ids], dim=0) else: cat_ids = token_type_ids # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long() seg_mat = nn.functional.one_hot(seg_mat, num_classes=2).to(dtype_float) else: seg_mat = None # Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz) pos_emb = pos_emb.to(output_h.device) pos_emb = self.dropout(pos_emb) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1) head_mask = head_mask.to( dtype=next(self.parameters()).dtype ) # switch to float if need + fp16 compatibility else: head_mask = [None] * self.n_layer new_mems = () if mems is None: mems = [None] * len(self.layer) attentions = [] if output_attentions else None hidden_states = [] if output_hidden_states else None for i, layer_module in enumerate(self.layer): if use_mems: # cache new mems new_mems = new_mems + (self.cache_mem(output_h, mems[i]),) if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) outputs = layer_module( output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask, r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping, head_mask=head_mask[i], output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_attentions: attentions.append(outputs[2]) # Add last hidden state if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) output = self.dropout(output_g if output_g is not None else output_h) # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method) output = output.permute(1, 0, 2).contiguous() if not use_mems: new_mems = None if output_hidden_states: if output_g is not None: hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs) else: hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states) if output_attentions: if target_mapping is not None: # when target_mapping is provided, there are 2-tuple of attentions attentions = tuple( tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions ) else: attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions) if not return_dict: return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None) return XLNetModelOutput( last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions ) @auto_docstring( custom_intro=""" XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """ ) class XLNetLMHeadModel(XLNetPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_loss.weight"] def __init__(self, config): super().__init__(config) self.attn_type = config.attn_type self.same_length = config.same_length self.transformer = XLNetModel(config) self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_loss def set_output_embeddings(self, new_embeddings): self.lm_loss = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_mems=None, **kwargs): # Overwritten -- this model has unique input preparation # Add dummy token at the end (no attention on this one) effective_batch_size = input_ids.shape[0] dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device) # At every pass, the attention values for the new token and the two last generated tokens # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have # offset = 1; offset = 2 seems to have slightly better computation. offset = 2 if past_key_values: input_ids = torch.cat([input_ids[:, -offset:], dummy_token], dim=1) else: input_ids = torch.cat([input_ids, dummy_token], dim=1) # Build permutation mask so that previous tokens don't see last token sequence_length = input_ids.shape[1] perm_mask = torch.zeros( (effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device ) perm_mask[:, :, -1] = 1.0 # We'll only predict the last token target_mapping = torch.zeros( (effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device ) target_mapping[:, 0, -1] = 1.0 inputs = { "input_ids": input_ids, "perm_mask": perm_mask, "target_mapping": target_mapping, "use_mems": use_mems, } # if past is defined in model kwargs then use it for faster decoding if past_key_values: inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past_key_values) return inputs @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[tuple, XLNetLMHeadModelOutput]: r""" mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). input_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. labels (`torch.LongTensor` of shape `(batch_size, num_predict)`, *optional*): Labels for masked language modeling. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a *<mask>* token has to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below) Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss is only computed for labels in `[0, ..., config.vocab_size]` use_mems (`bool`, *optional*): Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden states from previous forward passes to compute attention, which can significantly improve performance for sequential decoding tasks. Examples: ```python >>> from transformers import AutoTokenizer, XLNetLMHeadModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased") >>> model = XLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased") >>> # We show how to setup inputs to predict a next token using a bi-directional context. >>> input_ids = torch.tensor( ... tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False) ... ).unsqueeze( ... 0 ... ) # We will predict the masked token >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token >>> target_mapping = torch.zeros( ... (1, 1, input_ids.shape[1]), dtype=torch.float ... ) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[ ... 0, 0, -1 ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping) >>> next_token_logits = outputs[ ... 0 ... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] >>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling. >>> input_ids = torch.tensor( ... tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False) ... ).unsqueeze( ... 0 ... ) # We will predict the masked token >>> labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0) >>> assert labels.shape[0] == 1, "only one word will be predicted" >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) >>> perm_mask[ ... :, :, -1 ... ] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training >>> target_mapping = torch.zeros( ... (1, 1, input_ids.shape[1]), dtype=torch.float ... ) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[ ... 0, 0, -1 ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels) >>> loss = outputs.loss >>> next_token_logits = ( ... outputs.logits ... ) # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) logits = self.lm_loss(transformer_outputs[0]) loss = None if labels is not None: # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetLMHeadModelOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache(mems: list[torch.Tensor], beam_idx: torch.Tensor) -> list[torch.Tensor]: """ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every generation step. """ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems] @auto_docstring( custom_intro=""" XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class XLNetForSequenceClassification(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.transformer = XLNetModel(config) self.sequence_summary = XLNetSequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[tuple, XLNetForSequenceClassificationOutput]: r""" mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). input_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). use_mems (`bool`, *optional*): Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden states from previous forward passes to compute attention, which can significantly improve performance for sequential decoding tasks. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForSequenceClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring class XLNetForTokenClassification(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[tuple, XLNetForTokenClassificationOutput]: r""" mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). input_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) use_mems (`bool`, *optional*): Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden states from previous forward passes to compute attention, which can significantly improve performance for sequential decoding tasks.emory states to speed up sequential decoding. If set to `True`, the model will use the hidden states from previous forward passes to compute attention, which can significantly improve performance for sequential decoding tasks. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForTokenClassificationOutput( loss=loss, logits=logits, mems=outputs.mems, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class XLNetForMultipleChoice(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = XLNetModel(config) self.sequence_summary = XLNetSequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[tuple, XLNetForMultipleChoiceOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) input_mask (`torch.FloatTensor` of shape `batch_size, num_choices, sequence_length`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., use_mems (`bool`, *optional*): Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden states from previous forward passes to compute attention, which can significantly improve performance for sequential decoding tasks. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) transformer_outputs = self.transformer( flat_input_ids, token_type_ids=flat_token_type_ids, input_mask=flat_input_mask, attention_mask=flat_attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels.view(-1)) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForMultipleChoiceOutput( loss=loss, logits=reshaped_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring( custom_intro=""" XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """ ) class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[tuple, XLNetForQuestionAnsweringSimpleOutput]: r""" mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). input_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. use_mems (`bool`, *optional*): Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden states from previous forward passes to compute attention, which can significantly improve performance for sequential decoding tasks. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return XLNetForQuestionAnsweringSimpleOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, mems=outputs.mems, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class XLNetForQuestionAnswering(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.transformer = XLNetModel(config) self.start_logits = XLNetPoolerStartLogits(config) self.end_logits = XLNetPoolerEndLogits(config) self.answer_class = XLNetPoolerAnswerClass(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, is_impossible: Optional[torch.Tensor] = None, cls_index: Optional[torch.Tensor] = None, p_mask: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete when `use_cache` is removed in XLNetModel ) -> Union[tuple, XLNetForQuestionAnsweringOutput]: r""" mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). input_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels whether a question has an answer or no answer (SQuAD 2.0) cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the classification token to use as input for computing plausibility of the answer. p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be masked. 0.0 mean token is not masked. use_mems (`bool`, *optional*): Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden states from previous forward passes to compute attention, which can significantly improve performance for sequential decoding tasks. Example: ```python >>> from transformers import AutoTokenizer, XLNetForQuestionAnswering >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-base-cased") >>> model = XLNetForQuestionAnswering.from_pretrained("xlnet/xlnet-base-cased") >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze( ... 0 ... ) # Batch size 1 >>> start_positions = torch.tensor([1]) >>> end_positions = torch.tensor([3]) >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) hidden_states = transformer_outputs[0] start_logits = self.start_logits(hidden_states, p_mask=p_mask) outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 if not return_dict: return (total_loss,) + transformer_outputs[1:] else: return XLNetForQuestionAnsweringOutput( loss=total_loss, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk( start_log_probs, self.start_n_top, dim=-1 ) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( start_states ) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk( end_log_probs, self.end_n_top, dim=1 ) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum( "blh,bl->bh", hidden_states, start_log_probs ) # get the representation of START as weighted sum of hidden states cls_logits = self.answer_class( hidden_states, start_states=start_states, cls_index=cls_index ) # Shape (batch size,): one single `cls_logits` for each sample if not return_dict: outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) return outputs + transformer_outputs[1:] else: return XLNetForQuestionAnsweringOutput( start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = [ "XLNetForMultipleChoice", "XLNetForQuestionAnswering", "XLNetForQuestionAnsweringSimple", "XLNetForSequenceClassification", "XLNetForTokenClassification", "XLNetLMHeadModel", "XLNetModel", "XLNetPreTrainedModel", "load_tf_weights_in_xlnet", ]
transformers/src/transformers/models/xlnet/modeling_xlnet.py/0
{ "file_path": "transformers/src/transformers/models/xlnet/modeling_xlnet.py", "repo_id": "transformers", "token_count": 47559 }
545
# coding=utf-8 # Copyright 2022 School of EIC, Huazhong University of Science & Technology and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch YOLOS model.""" import collections.abc from dataclasses import dataclass from typing import Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, auto_docstring, logging from .configuration_yolos import YolosConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Output type of [`YolosForObjectDetection`]. """ ) class YolosObjectDetectionOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~YolosImageProcessor.post_process`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None auxiliary_outputs: Optional[list[dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None class YolosEmbeddings(nn.Module): """ Construct the CLS token, detection tokens, position and patch embeddings. """ def __init__(self, config: YolosConfig) -> None: super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.detection_tokens = nn.Parameter(torch.zeros(1, config.num_detection_tokens, config.hidden_size)) self.patch_embeddings = YolosPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter( torch.zeros(1, num_patches + config.num_detection_tokens + 1, config.hidden_size) ) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.interpolation = InterpolateInitialPositionEmbeddings(config) self.config = config def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values) batch_size, seq_len, _ = embeddings.size() # add the [CLS] and detection tokens to the embedded patch tokens cls_tokens = self.cls_token.expand(batch_size, -1, -1) detection_tokens = self.detection_tokens.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings, detection_tokens), dim=1) # add positional encoding to each token # this might require interpolation of the existing position embeddings position_embeddings = self.interpolation(self.position_embeddings, (height, width)) embeddings = embeddings + position_embeddings embeddings = self.dropout(embeddings) return embeddings class InterpolateInitialPositionEmbeddings(nn.Module): def __init__(self, config) -> None: super().__init__() self.config = config def forward(self, pos_embed, img_size=(800, 1344)) -> torch.Tensor: cls_pos_embed = pos_embed[:, 0, :] cls_pos_embed = cls_pos_embed[:, None] det_pos_embed = pos_embed[:, -self.config.num_detection_tokens :, :] patch_pos_embed = pos_embed[:, 1 : -self.config.num_detection_tokens, :] patch_pos_embed = patch_pos_embed.transpose(1, 2) batch_size, hidden_size, seq_len = patch_pos_embed.shape patch_height, patch_width = ( self.config.image_size[0] // self.config.patch_size, self.config.image_size[1] // self.config.patch_size, ) patch_pos_embed = patch_pos_embed.view(batch_size, hidden_size, patch_height, patch_width) height, width = img_size new_patch_height, new_patch_width = height // self.config.patch_size, width // self.config.patch_size patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_patch_height, new_patch_width), mode="bicubic", align_corners=False ) patch_pos_embed = patch_pos_embed.flatten(2).transpose(1, 2) scale_pos_embed = torch.cat((cls_pos_embed, patch_pos_embed, det_pos_embed), dim=1) return scale_pos_embed class InterpolateMidPositionEmbeddings(nn.Module): def __init__(self, config) -> None: super().__init__() self.config = config def forward(self, pos_embed, img_size=(800, 1344)) -> torch.Tensor: cls_pos_embed = pos_embed[:, :, 0, :] cls_pos_embed = cls_pos_embed[:, None] det_pos_embed = pos_embed[:, :, -self.config.num_detection_tokens :, :] patch_pos_embed = pos_embed[:, :, 1 : -self.config.num_detection_tokens, :] patch_pos_embed = patch_pos_embed.transpose(2, 3) depth, batch_size, hidden_size, seq_len = patch_pos_embed.shape patch_height, patch_width = ( self.config.image_size[0] // self.config.patch_size, self.config.image_size[1] // self.config.patch_size, ) patch_pos_embed = patch_pos_embed.view(depth * batch_size, hidden_size, patch_height, patch_width) height, width = img_size new_patch_height, new_patch_width = height // self.config.patch_size, width // self.config.patch_size patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_patch_height, new_patch_width), mode="bicubic", align_corners=False ) patch_pos_embed = ( patch_pos_embed.flatten(2) .transpose(1, 2) .contiguous() .view(depth, batch_size, new_patch_height * new_patch_width, hidden_size) ) scale_pos_embed = torch.cat((cls_pos_embed, patch_pos_embed, det_pos_embed), dim=2) return scale_pos_embed class YolosPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings # Copied from transformers.models.vit.modeling_vit.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling # Normalize the attention scores to probabilities. attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) # Mask heads if we want to if attention_mask is not None: attn_weights = attn_weights * attention_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Yolos class YolosSelfAttention(nn.Module): def __init__(self, config: YolosConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.dropout_prob = config.attention_probs_dropout_prob self.scaling = self.attention_head_size**-0.5 self.is_causal = False self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: batch_size, seq_length, _ = hidden_states.shape key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] context_layer, attention_probs = attention_interface( self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob, ) new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.reshape(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Yolos class YolosSelfOutput(nn.Module): """ The residual connection is defined in YolosLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: YolosConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Yolos class YolosAttention(nn.Module): def __init__(self, config: YolosConfig) -> None: super().__init__() self.attention = YolosSelfAttention(config) self.output = YolosSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->Yolos class YolosIntermediate(nn.Module): def __init__(self, config: YolosConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->Yolos class YolosOutput(nn.Module): def __init__(self, config: YolosConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->Yolos,VIT->YOLOS class YolosLayer(GradientCheckpointingLayer): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: YolosConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = YolosAttention(config) self.intermediate = YolosIntermediate(config) self.output = YolosOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in Yolos, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in Yolos, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs class YolosEncoder(nn.Module): def __init__(self, config: YolosConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([YolosLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False seq_length = ( 1 + (config.image_size[0] * config.image_size[1] // config.patch_size**2) + config.num_detection_tokens ) self.mid_position_embeddings = ( nn.Parameter( torch.zeros( config.num_hidden_layers - 1, 1, seq_length, config.hidden_size, ) ) if config.use_mid_position_embeddings else None ) self.interpolation = InterpolateMidPositionEmbeddings(config) if config.use_mid_position_embeddings else None def forward( self, hidden_states: torch.Tensor, height, width, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if self.config.use_mid_position_embeddings: interpolated_mid_position_embeddings = self.interpolation(self.mid_position_embeddings, (height, width)) for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if self.config.use_mid_position_embeddings: if i < (self.config.num_hidden_layers - 1): hidden_states = hidden_states + interpolated_mid_position_embeddings[i] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring class YolosPreTrainedModel(PreTrainedModel): config: YolosConfig base_model_prefix = "vit" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = [] _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @auto_docstring class YolosModel(YolosPreTrainedModel): def __init__(self, config: YolosConfig, add_pooling_layer: bool = True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = YolosEmbeddings(config) self.encoder = YolosEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = YolosPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> YolosPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None: """ Prunes heads of the model. Args: heads_to_prune (`dict`): See base class `PreTrainedModel`. The input dictionary must have the following format: {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, height=pixel_values.shape[-2], width=pixel_values.shape[-1], head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class YolosPooler(nn.Module): def __init__(self, config: YolosConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->Yolos class YolosMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x @auto_docstring( custom_intro=""" YOLOS Model (consisting of a ViT encoder) with object detection heads on top, for tasks such as COCO detection. """ ) class YolosForObjectDetection(YolosPreTrainedModel): def __init__(self, config: YolosConfig): super().__init__(config) # YOLOS (ViT) encoder model self.vit = YolosModel(config, add_pooling_layer=False) # Object detection heads # We add one for the "no object" class self.class_labels_classifier = YolosMLPPredictionHead( input_dim=config.hidden_size, hidden_dim=config.hidden_size, output_dim=config.num_labels + 1, num_layers=3 ) self.bbox_predictor = YolosMLPPredictionHead( input_dim=config.hidden_size, hidden_dim=config.hidden_size, output_dim=4, num_layers=3 ) # Initialize weights and apply final processing self.post_init() # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @auto_docstring def forward( self, pixel_values: torch.FloatTensor, labels: Optional[list[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, YolosObjectDetectionOutput]: r""" labels (`list[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: `'class_labels'` and `'boxes'` (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Examples: ```python >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("hustvl/yolos-tiny") >>> model = AutoModelForObjectDetection.from_pretrained("hustvl/yolos-tiny") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected remote with confidence 0.991 at location [46.48, 72.78, 178.98, 119.3] Detected remote with confidence 0.908 at location [336.48, 79.27, 368.23, 192.36] Detected cat with confidence 0.934 at location [337.18, 18.06, 638.14, 373.09] Detected cat with confidence 0.979 at location [10.93, 53.74, 313.41, 470.67] Detected remote with confidence 0.974 at location [41.63, 72.23, 178.09, 119.99] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through YOLOS base model to obtain hidden states outputs = self.vit( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # Take the final hidden states of the detection tokens sequence_output = sequence_output[:, -self.config.num_detection_tokens :, :] # Class logits + predicted bounding boxes logits = self.class_labels_classifier(sequence_output) pred_boxes = self.bbox_predictor(sequence_output).sigmoid() loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: outputs_class, outputs_coord = None, None if self.config.auxiliary_loss: intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] outputs_class = self.class_labels_classifier(intermediate) outputs_coord = self.bbox_predictor(intermediate).sigmoid() loss, loss_dict, auxiliary_outputs = self.loss_function( logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord ) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs return ((loss, loss_dict) + output) if loss is not None else output return YolosObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel"]
transformers/src/transformers/models/yolos/modeling_yolos.py/0
{ "file_path": "transformers/src/transformers/models/yolos/modeling_yolos.py", "repo_id": "transformers", "token_count": 14276 }
546
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ZoeDepth.""" import math from collections.abc import Iterable from typing import TYPE_CHECKING, Optional, Union import numpy as np if TYPE_CHECKING: from .modeling_zoedepth import ZoeDepthDepthEstimatorOutput from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import PaddingMode, pad, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import ( TensorType, filter_out_non_signature_kwargs, is_torch_available, is_vision_available, logging, requires_backends, ) if is_vision_available(): import PIL if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) def get_resize_output_image_size( input_image: np.ndarray, output_size: Union[int, Iterable[int]], keep_aspect_ratio: bool, multiple: int, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> tuple[int, int]: def constrain_to_multiple_of(val, multiple, min_val=0): x = (np.round(val / multiple) * multiple).astype(int) if x < min_val: x = math.ceil(val / multiple) * multiple return x output_size = (output_size, output_size) if isinstance(output_size, int) else output_size input_height, input_width = get_image_size(input_image, input_data_format) output_height, output_width = output_size # determine new height and width scale_height = output_height / input_height scale_width = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width) < abs(1 - scale_height): # fit width scale_height = scale_width else: # fit height scale_width = scale_height new_height = constrain_to_multiple_of(scale_height * input_height, multiple=multiple) new_width = constrain_to_multiple_of(scale_width * input_width, multiple=multiple) return (new_height, new_width) class ZoeDepthImageProcessor(BaseImageProcessor): r""" Constructs a ZoeDepth image processor. Args: do_pad (`bool`, *optional*, defaults to `True`): Whether to apply pad the input. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in `preprocess`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in `preprocess`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions. Can be overridden by `do_resize` in `preprocess`. size (`dict[str, int]` *optional*, defaults to `{"height": 384, "width": 512}`): Size of the image after resizing. Size of the image after resizing. If `keep_aspect_ratio` is `True`, the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. If `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Defines the resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. keep_aspect_ratio (`bool`, *optional*, defaults to `True`): If `True`, the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. This ensures that the image is scaled down as little as possible while still fitting within the desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value by flooring the height and width to the nearest multiple of this value. Can be overridden by `keep_aspect_ratio` in `preprocess`. ensure_multiple_of (`int`, *optional*, defaults to 32): If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring the height and width to the nearest multiple of this value. Works both with and without `keep_aspect_ratio` being set to `True`. Can be overridden by `ensure_multiple_of` in `preprocess`. """ model_input_names = ["pixel_values"] def __init__( self, do_pad: bool = True, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, keep_aspect_ratio: bool = True, ensure_multiple_of: int = 32, **kwargs, ) -> None: super().__init__(**kwargs) self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD size = size if size is not None else {"height": 384, "width": 512} size = get_size_dict(size) self.do_resize = do_resize self.size = size self.keep_aspect_ratio = keep_aspect_ratio self.ensure_multiple_of = ensure_multiple_of self.resample = resample def resize( self, image: np.ndarray, size: dict[str, int], keep_aspect_ratio: bool = False, ensure_multiple_of: int = 1, resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is resized to a size that is a multiple of this value. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Target size of the output image. keep_aspect_ratio (`bool`, *optional*, defaults to `False`): If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. ensure_multiple_of (`int`, *optional*, defaults to 1): The image is resized to a size that is a multiple of this value. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size specified in `size`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image) data_format = data_format if data_format is not None else input_data_format size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}") output_size = get_resize_output_image_size( image, output_size=(size["height"], size["width"]), keep_aspect_ratio=keep_aspect_ratio, multiple=ensure_multiple_of, input_data_format=input_data_format, ) height, width = output_size torch_image = torch.from_numpy(image).unsqueeze(0) torch_image = torch_image.permute(0, 3, 1, 2) if input_data_format == "channels_last" else torch_image # TODO support align_corners=True in image_transforms.resize requires_backends(self, "torch") resample_to_mode = {PILImageResampling.BILINEAR: "bilinear", PILImageResampling.BICUBIC: "bicubic"} mode = resample_to_mode[resample] resized_image = nn.functional.interpolate( torch_image, (int(height), int(width)), mode=mode, align_corners=True ) resized_image = resized_image.squeeze().numpy() resized_image = to_channel_dimension_format( resized_image, data_format, input_channel_dim=ChannelDimension.FIRST ) return resized_image def pad_image( self, image: np.array, mode: PaddingMode = PaddingMode.REFLECT, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Pad an image as done in the original ZoeDepth implementation. Padding fixes the boundary artifacts in the output depth map. Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image. This function pads the input image and crops the prediction back to the original size / view. Args: image (`np.ndarray`): Image to pad. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ height, width = get_image_size(image, input_data_format) pad_height = int(np.sqrt(height / 2) * 3) pad_width = int(np.sqrt(width / 2) * 3) return pad( image, padding=((pad_height, pad_height), (pad_width, pad_width)), mode=mode, data_format=data_format, input_data_format=input_data_format, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_pad: Optional[bool] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_resize: Optional[bool] = None, size: Optional[int] = None, keep_aspect_ratio: Optional[bool] = None, ensure_multiple_of: Optional[int] = None, resample: PILImageResampling = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the input image. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. If `keep_aspect_ratio` is `True`, he image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. If `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value. keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`): If `True` and `do_resize=True`, the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. This ensures that the image is scaled down as little as possible while still fitting within the desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value by flooring the height and width to the nearest multiple of this value. ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`): If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring the height and width to the nearest multiple of this value. Works both with and without `keep_aspect_ratio` being set to `True`. Can be overridden by `ensure_multiple_of` in `preprocess`. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_pad: images = [self.pad_image(image=image, input_data_format=input_data_format) for image in images] if do_resize: images = [ self.resize( image=image, size=size, resample=resample, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, input_data_format=input_data_format, ) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) def post_process_depth_estimation( self, outputs: "ZoeDepthDepthEstimatorOutput", source_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None, target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None, outputs_flipped: Optional[Union["ZoeDepthDepthEstimatorOutput", None]] = None, do_remove_padding: Optional[Union[bool, None]] = None, ) -> list[dict[str, TensorType]]: """ Converts the raw output of [`ZoeDepthDepthEstimatorOutput`] into final depth predictions and depth PIL images. Only supports PyTorch. Args: outputs ([`ZoeDepthDepthEstimatorOutput`]): Raw outputs of the model. source_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the source size (height, width) of each image in the batch before preprocessing. This argument should be dealt as "required" unless the user passes `do_remove_padding=False` as input to this function. target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. outputs_flipped ([`ZoeDepthDepthEstimatorOutput`], *optional*): Raw outputs of the model from flipped input (averaged out in the end). do_remove_padding (`bool`, *optional*): By default ZoeDepth adds padding equal to `int(√(height / 2) * 3)` (and similarly for width) to fix the boundary artifacts in the output depth map, so we need remove this padding during post_processing. The parameter exists here in case the user changed the image preprocessing to not include padding. Returns: `list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth predictions. """ requires_backends(self, "torch") predicted_depth = outputs.predicted_depth if (outputs_flipped is not None) and (predicted_depth.shape != outputs_flipped.predicted_depth.shape): raise ValueError("Make sure that `outputs` and `outputs_flipped` have the same shape") if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the predicted depth" ) if do_remove_padding is None: do_remove_padding = self.do_pad if source_sizes is None and do_remove_padding: raise ValueError( "Either `source_sizes` should be passed in, or `do_remove_padding` should be set to False" ) if (source_sizes is not None) and (len(predicted_depth) != len(source_sizes)): raise ValueError( "Make sure that you pass in as many source image sizes as the batch dimension of the logits" ) if outputs_flipped is not None: predicted_depth = (predicted_depth + torch.flip(outputs_flipped.predicted_depth, dims=[-1])) / 2 predicted_depth = predicted_depth.unsqueeze(1) # Zoe Depth model adds padding around the images to fix the boundary artifacts in the output depth map # The padding length is `int(np.sqrt(img_h/2) * fh)` for the height and similar for the width # fh (and fw respectively) are equal to '3' by default # Check [here](https://github.com/isl-org/ZoeDepth/blob/edb6daf45458569e24f50250ef1ed08c015f17a7/zoedepth/models/depth_model.py#L57) # for the original implementation. # In this section, we remove this padding to get the final depth image and depth prediction padding_factor_h = padding_factor_w = 3 results = [] target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes source_sizes = [None] * len(predicted_depth) if source_sizes is None else source_sizes for depth, target_size, source_size in zip(predicted_depth, target_sizes, source_sizes): # depth.shape = [1, H, W] if source_size is not None: pad_h = pad_w = 0 if do_remove_padding: pad_h = int(np.sqrt(source_size[0] / 2) * padding_factor_h) pad_w = int(np.sqrt(source_size[1] / 2) * padding_factor_w) depth = nn.functional.interpolate( depth.unsqueeze(1), size=[source_size[0] + 2 * pad_h, source_size[1] + 2 * pad_w], mode="bicubic", align_corners=False, ) if pad_h > 0: depth = depth[:, :, pad_h:-pad_h, :] if pad_w > 0: depth = depth[:, :, :, pad_w:-pad_w] depth = depth.squeeze(1) # depth.shape = [1, H, W] if target_size is not None: target_size = [target_size[0], target_size[1]] depth = nn.functional.interpolate( depth.unsqueeze(1), size=target_size, mode="bicubic", align_corners=False ) depth = depth.squeeze() # depth.shape = [H, W] results.append({"predicted_depth": depth}) return results __all__ = ["ZoeDepthImageProcessor"]
transformers/src/transformers/models/zoedepth/image_processing_zoedepth.py/0
{ "file_path": "transformers/src/transformers/models/zoedepth/image_processing_zoedepth.py", "repo_id": "transformers", "token_count": 11833 }
547
from typing import Any, Union, overload from ..utils import ( add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import Pipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) class DepthEstimationPipeline(Pipeline): """ Depth estimation pipeline using any `AutoModelForDepthEstimation`. This pipeline predicts the depth of an image. Example: ```python >>> from transformers import pipeline >>> depth_estimator = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-base-hf") >>> output = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg") >>> # This is a tensor with the values being the depth expressed in meters for each pixel >>> output["predicted_depth"].shape torch.Size([1, 384, 384]) ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This depth estimation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"depth-estimation"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=depth-estimation). """ _load_processor = False _load_image_processor = True _load_feature_extractor = False _load_tokenizer = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") self.check_model_type(MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES) @overload def __call__(self, inputs: Union[str, "Image.Image"], **kwargs: Any) -> dict[str, Any]: ... @overload def __call__(self, inputs: list[Union[str, "Image.Image"]], **kwargs: Any) -> list[dict[str, Any]]: ... def __call__( self, inputs: Union[str, list[str], "Image.Image", list["Image.Image"]], **kwargs: Any ) -> Union[dict[str, Any], list[dict[str, Any]]]: """ Predict the depth(s) of the image(s) passed as inputs. Args: inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. parameters (`Dict`, *optional*): A dictionary of argument names to parameter values, to control pipeline behaviour. The only parameter available right now is `timeout`, which is the length of time, in seconds, that the pipeline should wait before giving up on trying to download an image. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images. The dictionaries contain the following keys: - **predicted_depth** (`torch.Tensor`) -- The predicted depth by the model as a `torch.Tensor`. - **depth** (`PIL.Image`) -- The predicted depth by the model as a `PIL.Image`. """ # After deprecation of this is completed, remove the default `None` value for `images` if "images" in kwargs: inputs = kwargs.pop("images") if inputs is None: raise ValueError("Cannot call the depth-estimation pipeline without an inputs argument!") return super().__call__(inputs, **kwargs) def _sanitize_parameters(self, timeout=None, parameters=None, **kwargs): preprocess_params = {} if timeout is not None: preprocess_params["timeout"] = timeout if isinstance(parameters, dict) and "timeout" in parameters: preprocess_params["timeout"] = parameters["timeout"] return preprocess_params, {}, {} def preprocess(self, image, timeout=None): image = load_image(image, timeout) model_inputs = self.image_processor(images=image, return_tensors=self.framework) if self.framework == "pt": model_inputs = model_inputs.to(self.dtype) model_inputs["target_size"] = image.size[::-1] return model_inputs def _forward(self, model_inputs): target_size = model_inputs.pop("target_size") model_outputs = self.model(**model_inputs) model_outputs["target_size"] = target_size return model_outputs def postprocess(self, model_outputs): outputs = self.image_processor.post_process_depth_estimation( model_outputs, # this acts as `source_sizes` for ZoeDepth and as `target_sizes` for the rest of the models so do *not* # replace with `target_sizes = [model_outputs["target_size"]]` [model_outputs["target_size"]], ) formatted_outputs = [] for output in outputs: depth = output["predicted_depth"].detach().cpu().numpy() depth = (depth - depth.min()) / (depth.max() - depth.min()) depth = Image.fromarray((depth * 255).astype("uint8")) formatted_outputs.append({"predicted_depth": output["predicted_depth"], "depth": depth}) return formatted_outputs[0] if len(outputs) == 1 else formatted_outputs
transformers/src/transformers/pipelines/depth_estimation.py/0
{ "file_path": "transformers/src/transformers/pipelines/depth_estimation.py", "repo_id": "transformers", "token_count": 2366 }
548
import inspect import warnings from typing import Any, Union import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import GenericTensor, Pipeline, build_pipeline_init_args if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES def sigmoid(_outputs): return 1.0 / (1.0 + np.exp(-_outputs)) def softmax(_outputs): maxes = np.max(_outputs, axis=-1, keepdims=True) shifted_exp = np.exp(_outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class ClassificationFunction(ExplicitEnum): SIGMOID = "sigmoid" SOFTMAX = "softmax" NONE = "none" @add_end_docstrings( build_pipeline_init_args(has_tokenizer=True), r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. In case of regression tasks, will not apply any function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output.""", ) class TextClassificationPipeline(Pipeline): """ Text classification pipeline using any `ModelForSequenceClassification`. See the [sequence classification examples](../task_summary#sequence-classification) for more information. Example: ```python >>> from transformers import pipeline >>> classifier = pipeline(model="distilbert/distilbert-base-uncased-finetuned-sst-2-english") >>> classifier("This movie is disgustingly good !") [{'label': 'POSITIVE', 'score': 1.0}] >>> classifier("Director tried too much.") [{'label': 'NEGATIVE', 'score': 0.996}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This text classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"sentiment-analysis"` (for classifying sequences according to positive or negative sentiments). If multiple classification labels are available (`model.config.num_labels >= 2`), the pipeline will run a softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result. In case of regression tasks (`model.config.problem_type == "regression"`), will not apply any function on the output. The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-classification). """ _load_processor = False _load_image_processor = False _load_feature_extractor = False _load_tokenizer = True return_all_scores = False function_to_apply = ClassificationFunction.NONE def __init__(self, **kwargs): super().__init__(**kwargs) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs): # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" preprocess_params = tokenizer_kwargs postprocess_params = {} if hasattr(self.model.config, "return_all_scores") and return_all_scores is None: return_all_scores = self.model.config.return_all_scores if isinstance(top_k, int) or top_k is None: postprocess_params["top_k"] = top_k postprocess_params["_legacy"] = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.", UserWarning, ) if return_all_scores: postprocess_params["top_k"] = None else: postprocess_params["top_k"] = 1 if isinstance(function_to_apply, str): function_to_apply = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: postprocess_params["function_to_apply"] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self, inputs: Union[str, list[str], dict[str, str], list[dict[str, str]]], **kwargs: Any, ) -> list[dict[str, Any]]: """ Classify the text(s) given as inputs. Args: inputs (`str` or `list[str]` or `dict[str]`, or `list[dict[str]]`): One or several texts to classify. In order to use text pairs for your classification, you can send a dictionary containing `{"text", "text_pair"}` keys, or a list of those. top_k (`int`, *optional*, defaults to `1`): How many results to return. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: If this argument is not specified, then it will apply the following functions according to the number of labels: - If problem type is regression, will not apply any function on the output. - If the model has a single label, will apply the sigmoid function on the output. - If the model has several labels, will apply the softmax function on the output. Possible values are: - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. Return: A list of `dict`: Each result comes as list of dictionaries with the following keys: - **label** (`str`) -- The label predicted. - **score** (`float`) -- The corresponding probability. If `top_k` is used, one such dictionary is returned per label. """ inputs = (inputs,) result = super().__call__(*inputs, **kwargs) # TODO try and retrieve it in a nicer way from _sanitize_parameters. _legacy = "top_k" not in kwargs if isinstance(inputs[0], str) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def preprocess(self, inputs, **tokenizer_kwargs) -> dict[str, GenericTensor]: return_tensors = self.framework if isinstance(inputs, dict): return self.tokenizer(**inputs, return_tensors=return_tensors, **tokenizer_kwargs) elif isinstance(inputs, list) and len(inputs) == 1 and isinstance(inputs[0], list) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0], text_pair=inputs[0][1], return_tensors=return_tensors, **tokenizer_kwargs ) elif isinstance(inputs, list): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs) def _forward(self, model_inputs): # `XXXForSequenceClassification` models should not use `use_cache=True` even if it's supported model_forward = self.model.forward if self.framework == "pt" else self.model.call if "use_cache" in inspect.signature(model_forward).parameters: model_inputs["use_cache"] = False return self.model(**model_inputs) def postprocess(self, model_outputs, function_to_apply=None, top_k=1, _legacy=True): # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "regression": function_to_apply = ClassificationFunction.NONE elif self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: function_to_apply = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: function_to_apply = ClassificationFunction.SOFTMAX elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None: function_to_apply = self.model.config.function_to_apply else: function_to_apply = ClassificationFunction.NONE outputs = model_outputs["logits"][0] if self.framework == "pt": # To enable using fp16 and bf16 outputs = outputs.float().numpy() else: outputs = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: scores = sigmoid(outputs) elif function_to_apply == ClassificationFunction.SOFTMAX: scores = softmax(outputs) elif function_to_apply == ClassificationFunction.NONE: scores = outputs else: raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}") if top_k == 1 and _legacy: return {"label": self.model.config.id2label[scores.argmax().item()], "score": scores.max().item()} dict_scores = [ {"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores) ] if not _legacy: dict_scores.sort(key=lambda x: x["score"], reverse=True) if top_k is not None: dict_scores = dict_scores[:top_k] return dict_scores
transformers/src/transformers/pipelines/text_classification.py/0
{ "file_path": "transformers/src/transformers/pipelines/text_classification.py", "repo_id": "transformers", "token_count": 4512 }
549
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from typing import TYPE_CHECKING from packaging import version from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..integrations import replace_with_aqlm_linear from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available, logging from ..utils.quantization_config import QuantizationConfigMixin if is_torch_available(): import torch logger = logging.get_logger(__name__) class AqlmHfQuantizer(HfQuantizer): """ Quantizer of the AQLM method. Enables the loading of prequantized models. """ requires_calibration = True required_packages = ["aqlm"] optimum_quantizer = None def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError("Using `aqlm` quantization requires Accelerate: `pip install accelerate`") if not is_aqlm_available(): raise ImportError("Using `aqlm` quantization requires AQLM: `pip install aqlm[gpu,cpu]`") def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype": if dtype is None: if torch.cuda.is_available(): dtype = torch.float16 logger.info( "CUDA available. Assuming AQLM inference on GPU and loading the model in `torch.float16`. To overwrite it, set `dtype` manually." ) else: dtype = torch.float32 logger.info( "CUDA is unavailable. Assuming AQLM inference on CPU and loading the model in `torch.float32`. To overwrite it, set `dtype` manually." ) return dtype def _process_model_before_weight_loading( self, model: "PreTrainedModel", **kwargs, ): replace_with_aqlm_linear( model, quantization_config=self.quantization_config, linear_weights_not_to_quantize=self.quantization_config.linear_weights_not_to_quantize, ) model.config.quantization_config = self.quantization_config def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model @property def is_trainable(self) -> bool: aqlm_supports_training = version.parse(importlib.metadata.version("aqlm")) >= version.parse("1.0.2") if aqlm_supports_training: return True else: logger.warning( f"Currently installed `aqlm` version ({importlib.metadata.version('aqlm')}) doesn't support training. If you wish to train a quantized model, please update `aqlm` with `pip install aqlm>=1.0.2`" ) return False def is_serializable(self, safe_serialization=None): return True
transformers/src/transformers/quantizers/quantizer_aqlm.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_aqlm.py", "repo_id": "transformers", "token_count": 1390 }
550
# coding=utf-8 # Copyright 2025 Advanced Micro Devices, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, Any from ..file_utils import is_torch_available from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel if is_torch_available(): import torch from ..utils import is_accelerate_available, is_quark_available, logging if is_accelerate_available(): from accelerate.utils import set_module_tensor_to_device logger = logging.get_logger(__name__) CHECKPOINT_KEYS = { "weight_scale": "weight_quantizer.scale", "bias_scale": "bias_quantizer.scale", "input_scale": "input_quantizer.scale", "output_scale": "output_quantizer.scale", "weight_zero_point": "weight_quantizer.zero_point", "bias_zero_point": "bias_quantizer.zero_point", "input_zero_point": "input_quantizer.zero_point", "output_zero_point": "output_quantizer.zero_point", } class QuarkHfQuantizer(HfQuantizer): """ Quark quantizer (https://quark.docs.amd.com/latest/). """ requires_calibration = True # On-the-fly quantization with quark is not supported for now. required_packages = ["quark"] # Checkpoints are expected to be already quantized when loading a quark model. However, as some keys from # the checkpoint might mismatch the model parameters keys, we use the `create_quantized_param` method # to load the checkpoints, remapping the keys. requires_parameters_quantization = True def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.json_export_config = quantization_config.json_export_config def validate_environment(self, *args, **kwargs): if not is_quark_available(): raise ImportError( "Loading a Quark quantized model requires the `quark` library but it was not found in the environment. Please refer to https://quark.docs.amd.com/latest/install.html." ) def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs): from quark.torch.export.api import _map_to_quark _map_to_quark( model, self.quantization_config.quant_config, pack_method=self.json_export_config.pack_method, custom_mode=self.quantization_config.custom_mode, ) return model def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: dict[str, Any], **kwargs, ) -> bool: return True def create_quantized_param( self, model, param, param_name, param_device, state_dict, unexpected_keys ) -> "torch.nn.Parameter": postfix = param_name.split(".")[-1] if postfix in CHECKPOINT_KEYS: param_name = param_name.replace(postfix, CHECKPOINT_KEYS[postfix]) set_module_tensor_to_device(model, param_name, param_device, value=param) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model def is_serializable(self, safe_serialization=None): return False @property def is_trainable(self): return False
transformers/src/transformers/quantizers/quantizer_quark.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_quark.py", "repo_id": "transformers", "token_count": 1415 }
551
# Copyright 2020-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task. """ import contextlib import copy import functools import glob import importlib.metadata import inspect import json import math import os import random import re import shutil import sys import tempfile import time import warnings from collections.abc import Iterator, Mapping from functools import partial from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Optional, Union # Integrations must be imported before ML frameworks: # ruff: isort: off from .integrations import ( get_reporting_integration_callbacks, ) # ruff: isort: on import huggingface_hub.utils as hf_hub_utils import numpy as np import torch import torch.distributed as dist from huggingface_hub import ModelCard, create_repo, upload_folder from packaging import version from torch import nn from torch.utils.data import DataLoader, Dataset, IterableDataset, RandomSampler, SequentialSampler from . import __version__ from .configuration_utils import PretrainedConfig from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator from .debug_utils import DebugOption, DebugUnderflowOverflow from .feature_extraction_sequence_utils import SequenceFeatureExtractor from .feature_extraction_utils import FeatureExtractionMixin from .hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS, default_hp_search_backend from .image_processing_utils import BaseImageProcessor from .integrations.deepspeed import deepspeed_init, deepspeed_load_checkpoint, is_deepspeed_available from .integrations.tpu import tpu_spmd_dataloader from .modelcard import TrainingSummary from .modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model from .models.auto.modeling_auto import ( MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from .optimization import Adafactor, get_scheduler from .processing_utils import ProcessorMixin from .pytorch_utils import ( is_torch_greater_or_equal_than_2_3, ) from .tokenization_utils_base import PreTrainedTokenizerBase from .trainer_callback import ( CallbackHandler, DefaultFlowCallback, ExportableState, PrinterCallback, ProgressCallback, TrainerCallback, TrainerControl, TrainerState, ) from .trainer_pt_utils import ( DistributedTensorGatherer, EvalLoopContainer, IterableDatasetShard, LabelSmoother, LayerWiseDummyOptimizer, LengthGroupedSampler, SequentialDistributedSampler, distributed_broadcast_scalars, distributed_concat, find_batch_size, get_model_param_count, get_module_class_from_name, get_parameter_names, nested_concat, nested_detach, nested_numpify, nested_xla_mesh_reduce, reissue_pt_warnings, remove_dummy_checkpoint, set_rng_state_for_device, ) from .trainer_utils import ( PREFIX_CHECKPOINT_DIR, BestRun, EvalLoopOutput, EvalPrediction, HPSearchBackend, HubStrategy, PredictionOutput, RemoveColumnsCollator, SaveStrategy, TrainerMemoryTracker, TrainOutput, check_target_module_exists, default_compute_objective, denumpify_detensorize, enable_full_determinism, find_executable_batch_size, get_last_checkpoint, has_length, neftune_post_forward_hook, number_of_arguments, seed_worker, set_seed, speed_metrics, ) from .training_args import OptimizerNames, ParallelMode, TrainingArguments from .utils import ( ADAPTER_CONFIG_NAME, ADAPTER_SAFE_WEIGHTS_NAME, ADAPTER_WEIGHTS_NAME, CONFIG_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, XLA_FSDPV2_MIN_VERSION, PushInProgress, PushToHubMixin, can_return_loss, check_torch_load_is_safe, find_labels, is_accelerate_available, is_apollo_torch_available, is_bitsandbytes_available, is_datasets_available, is_galore_torch_available, is_grokadamw_available, is_in_notebook, is_liger_kernel_available, is_lomo_available, is_peft_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_schedulefree_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_neuroncore_available, is_torch_npu_available, is_torch_optimi_available, is_torch_xla_available, is_torch_xpu_available, is_torchao_available, logging, strtobool, ) from .utils.deprecation import deprecate_kwarg from .utils.import_utils import requires from .utils.quantization_config import QuantizationMethod DEFAULT_CALLBACKS = [DefaultFlowCallback] DEFAULT_PROGRESS_CALLBACK = ProgressCallback if is_in_notebook(): from .utils.notebook import NotebookProgressCallback DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback if is_datasets_available(): import datasets if is_torch_xla_available(): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met import torch_xla.runtime as xr from torch_xla import __version__ as XLA_VERSION IS_XLA_FSDPV2_POST_2_2 = version.parse(XLA_VERSION) >= version.parse(XLA_FSDPV2_MIN_VERSION) if IS_XLA_FSDPV2_POST_2_2: import torch_xla.distributed.spmd as xs else: IS_XLA_FSDPV2_POST_2_2 = False if is_sagemaker_mp_enabled(): import smdistributed.modelparallel.torch as smp from smdistributed.modelparallel import __version__ as SMP_VERSION IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10") from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat else: IS_SAGEMAKER_MP_POST_1_10 = False if is_safetensors_available(): import safetensors.torch if is_peft_available(): from peft import PeftModel if is_accelerate_available(): from accelerate import Accelerator, skip_first_batches from accelerate import __version__ as accelerate_version from accelerate.state import AcceleratorState from accelerate.utils import ( AutocastKwargs, DistributedDataParallelKwargs, DistributedType, load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer, ) DATA_SAMPLERS = [RandomSampler] if version.parse(accelerate_version) > version.parse("1.3.0"): from accelerate.utils import TorchTensorParallelPlugin if version.parse(accelerate_version) > version.parse("0.23.0"): from accelerate.data_loader import SeedableRandomSampler DATA_SAMPLERS += [SeedableRandomSampler] if is_deepspeed_available(): from accelerate.utils import DeepSpeedSchedulerWrapper if is_accelerate_available("0.28.0"): from accelerate.utils import DataLoaderConfiguration def _is_peft_model(model): if is_peft_available(): classes_to_check = (PeftModel,) # Here we also check if the model is an instance of `PeftMixedModel` introduced in peft>=0.7.0: https://github.com/huggingface/transformers/pull/28321 if version.parse(importlib.metadata.version("peft")) >= version.parse("0.7.0"): from peft import PeftMixedModel classes_to_check = (*classes_to_check, PeftMixedModel) return isinstance(model, classes_to_check) return False def _get_fsdp_ckpt_kwargs(): # TODO: @AjayP13, @younesbelkada replace this check with version check at the next `accelerate` release if is_accelerate_available() and "adapter_only" in list(inspect.signature(save_fsdp_model).parameters): return {"adapter_only": True} else: return {} def safe_globals(): # Starting from version 2.4 PyTorch introduces a check for the objects loaded # with torch.load(weights_only=True). Starting from 2.6 weights_only=True becomes # a default and requires allowlisting of objects being loaded. # See: https://github.com/pytorch/pytorch/pull/137602 # See: https://pytorch.org/docs/stable/notes/serialization.html#torch.serialization.add_safe_globals # See: https://github.com/huggingface/accelerate/pull/3036 if version.parse(torch.__version__).release < version.parse("2.6").release: return contextlib.nullcontext() np_core = np._core if version.parse(np.__version__) >= version.parse("2.0.0") else np.core allowlist = [np_core.multiarray._reconstruct, np.ndarray, np.dtype] # numpy >1.25 defines numpy.dtypes.UInt32DType, but below works for # all versions of numpy allowlist += [type(np.dtype(np.uint32))] return torch.serialization.safe_globals(allowlist) if TYPE_CHECKING: import optuna if is_datasets_available(): import datasets logger = logging.get_logger(__name__) # Name of the files used for checkpointing TRAINING_ARGS_NAME = "training_args.bin" TRAINER_STATE_NAME = "trainer_state.json" OPTIMIZER_NAME = "optimizer.pt" SCALER_NAME = "scaler.pt" OPTIMIZER_NAME_BIN = "optimizer.bin" SCHEDULER_NAME = "scheduler.pt" FSDP_MODEL_NAME = "pytorch_model_fsdp" @requires( backends=( "torch", "accelerate", ) ) class Trainer: """ Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. Args: model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*): The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed. <Tip> [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers models. </Tip> args ([`TrainingArguments`], *optional*): The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided. data_collator (`DataCollator`, *optional*): The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will default to [`default_data_collator`] if no `processing_class` is provided, an instance of [`DataCollatorWithPadding`] otherwise if the processing_class is a feature extractor or tokenizer. train_dataset (Union[`torch.utils.data.Dataset`, `torch.utils.data.IterableDataset`, `datasets.Dataset`], *optional*): The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally sets the seed of the RNGs used. eval_dataset (Union[`torch.utils.data.Dataset`, dict[str, `torch.utils.data.Dataset`, `datasets.Dataset`]), *optional*): The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each dataset prepending the dictionary key to the metric name. processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*): Processing class used to process the data. If provided, will be used to automatically process the inputs for the model, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model. This supersedes the `tokenizer` argument, which is now deprecated. model_init (`Callable[[], PreTrainedModel]`, *optional*): A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start from a new instance of the model as given by this function. The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to be able to choose different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc). compute_loss_func (`Callable`, *optional*): A function that accepts the raw model outputs, labels, and the number of items in the entire accumulated batch (batch_size * gradient_accumulation_steps) and returns the loss. For example, see the default [loss function](https://github.com/huggingface/transformers/blob/052e652d6d53c2b26ffde87e039b723949a53493/src/transformers/trainer.py#L3618) used by [`Trainer`]. compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*): The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return a dictionary string to metric values. *Note* When passing TrainingArgs with `batch_eval_metrics` set to `True`, your compute_metrics function must take a boolean `compute_result` argument. This will be triggered after the last eval batch to signal that the function needs to calculate and return the global summary statistics rather than accumulating the batch-level statistics callbacks (List of [`TrainerCallback`], *optional*): A list of callbacks to customize the training loop. Will add those to the list of default callbacks detailed in [here](callback). If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method. optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`): A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`. optimizer_cls_and_kwargs (`tuple[Type[torch.optim.Optimizer], dict[str, Any]]`, *optional*): A tuple containing the optimizer class and keyword arguments to use. Overrides `optim` and `optim_args` in `args`. Incompatible with the `optimizers` argument. Unlike `optimizers`, this argument avoids the need to place model parameters on the correct devices before initializing the Trainer. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*): A function that preprocess the logits right before caching them at each evaluation step. Must take two tensors, the logits and the labels, and return the logits once processed as desired. The modifications made by this function will be reflected in the predictions received by `compute_metrics`. Note that the labels (second parameter) will be `None` if the dataset does not have them. Important attributes: - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`] subclass. - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`, the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`. - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from data parallelism, this means some of the model layers are split on different GPUs). - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set to `False` if model parallel or deepspeed is used, or if the default `TrainingArguments.place_model_on_device` is overridden to return `False` . - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while in `train`) """ # Those are used as methods of the Trainer in examples. from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state @deprecate_kwarg("tokenizer", new_name="processing_class", version="5.0.0", raise_if_both_names=True) def __init__( self, model: Union[PreTrainedModel, nn.Module, None] = None, args: TrainingArguments = None, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Union[Dataset, IterableDataset, "datasets.Dataset"]] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset], "datasets.Dataset"]] = None, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ] = None, model_init: Optional[Callable[[], PreTrainedModel]] = None, compute_loss_func: Optional[Callable] = None, compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None, callbacks: Optional[list[TrainerCallback]] = None, optimizers: tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]] = (None, None), optimizer_cls_and_kwargs: Optional[tuple[type[torch.optim.Optimizer], dict[str, Any]]] = None, preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, ): if args is None: output_dir = "tmp_trainer" logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.") args = TrainingArguments(output_dir=output_dir) if args.batch_eval_metrics and compute_metrics is not None: if "compute_result" not in inspect.signature(compute_metrics).parameters: raise ValueError( "When using `batch_eval_metrics`, your `compute_metrics` function must take a `compute_result`" " boolean argument which will be triggered after the last batch of the eval set to signal that the" " summary statistics should be returned by the function." ) if args.eval_strategy is not None and args.eval_strategy != "no" and eval_dataset is None: raise ValueError( f"You have set `args.eval_strategy` to {args.eval_strategy} but you didn't pass an `eval_dataset` to `Trainer`. Either set `args.eval_strategy` to `no` or pass an `eval_dataset`. " ) if args.save_strategy == SaveStrategy.BEST or args.load_best_model_at_end: if args.metric_for_best_model is None: raise ValueError( "`args.metric_for_best_model` must be provided when using 'best' save_strategy or if `args.load_best_model_at_end` is set to `True`." ) self.args = args self.compute_loss_func = compute_loss_func # Seed must be set before instantiating the model when using model enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.hp_name = None self.deepspeed = None self.is_in_train = False self.model = model self.create_accelerator_and_postprocess() # memory metrics - must set up as early as possible self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) self._memory_tracker.start() # set the correct log level depending on the node log_level = args.get_process_log_level() logging.set_verbosity(log_level) # force device and distributed setup init explicitly args._setup_devices if model is None: if model_init is not None: self.model_init = model_init model = self.call_model_init() else: raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument") else: if model_init is not None: warnings.warn( "`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will" " overwrite your model when calling the `train` method. This will become a fatal error in the next" " release.", FutureWarning, ) self.model_init = model_init if model.__class__.__name__ in MODEL_MAPPING_NAMES: raise ValueError( f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only " "computes hidden states and does not accept any labels. You should choose a model with a head " "suitable for your task like any of the `AutoModelForXxx` listed at " "https://huggingface.co/docs/transformers/model_doc/auto" ) if getattr(model, "is_parallelizable", False) and getattr(model, "model_parallel", False): self.is_model_parallel = True else: self.is_model_parallel = False if getattr(model, "hf_device_map", None) is not None: devices = [device for device in set(model.hf_device_map.values()) if device not in ["cpu", "disk"]] if len(devices) > 1: self.is_model_parallel = True elif len(devices) == 1: self.is_model_parallel = self.args.device != torch.device(devices[0]) else: self.is_model_parallel = False # warn users if self.is_model_parallel: logger.info( "You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set" " to `True` to avoid any unexpected behavior such as device placement mismatching." ) if self.args.use_liger_kernel: if is_liger_kernel_available(): from liger_kernel.transformers import _apply_liger_kernel_to_instance # Prepare kernel config - use provided config or default (empty dict for default behavior) kernel_config = self.args.liger_kernel_config if self.args.liger_kernel_config is not None else {} if isinstance(model, PreTrainedModel): # Patch the model with liger kernels. Use the specified or default kernel configurations. _apply_liger_kernel_to_instance(model=model, **kernel_config) elif hasattr(model, "get_base_model") and isinstance(model.get_base_model(), PreTrainedModel): # Patch the base model with liger kernels where model is a PeftModel. Use the specified or default kernel configurations. _apply_liger_kernel_to_instance(model=model.get_base_model(), **kernel_config) else: logger.warning( "The model is not an instance of PreTrainedModel. No liger kernels will be applied." ) else: raise ImportError( "You have set `use_liger_kernel` to `True` but liger-kernel >= 0.3.0 is not available. " "Please install it with `pip install liger-kernel`" ) _is_quantized_and_base_model = getattr(model, "is_quantized", False) and not getattr( model, "_hf_peft_config_loaded", False ) _quantization_method_supports_training = ( getattr(model, "hf_quantizer", None) is not None and model.hf_quantizer.is_trainable ) _is_model_quantized_and_qat_trainable = getattr(model, "hf_quantizer", None) is not None and getattr( model.hf_quantizer, "is_qat_trainable", False ) # Filter out quantized + compiled models if _is_quantized_and_base_model and hasattr(model, "_orig_mod"): raise ValueError( "You cannot fine-tune quantized model with `torch.compile()` make sure to pass a non-compiled model when fine-tuning a quantized model with PEFT" ) # At this stage the model is already loaded if _is_quantized_and_base_model and not _is_peft_model(model) and not _is_model_quantized_and_qat_trainable: raise ValueError( "You cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of" " the quantized model to correctly perform fine-tuning. Please see: https://huggingface.co/docs/transformers/peft" " for more details" ) elif _is_quantized_and_base_model and not _quantization_method_supports_training: raise ValueError( f"The model you are trying to fine-tune is quantized with {model.hf_quantizer.quantization_config.quant_method}" " but that quantization method do not support training. Please open an issue on GitHub: https://github.com/huggingface/transformers" f" to request the support for training support for {model.hf_quantizer.quantization_config.quant_method}" ) self.is_fsdp_xla_enabled = args.fsdp_config["xla"] if len(args.fsdp) > 0: if self.is_deepspeed_enabled: raise ValueError( "Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags." ) if not args.fsdp_config["xla"] and args.parallel_mode != ParallelMode.DISTRIBUTED: raise ValueError("Using fsdp only works in distributed training.") # one place to sort out whether to place the model on device or not # postpone switching model to cuda when: # 1. MP - since we are trying to fit a much bigger than 1 gpu model # 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway, # and we only use deepspeed for training at the moment # 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first # 4. FSDP - same as MP self.place_model_on_device = args.place_model_on_device if ( self.is_model_parallel or self.is_deepspeed_enabled or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train) or self.is_fsdp_xla_enabled or self.is_fsdp_enabled ): self.place_model_on_device = False default_collator = ( DataCollatorWithPadding(processing_class) if processing_class is not None and isinstance(processing_class, (PreTrainedTokenizerBase, SequenceFeatureExtractor)) else default_data_collator ) self.data_collator = data_collator if data_collator is not None else default_collator self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.processing_class = processing_class # Bnb Quantized models doesn't support `.to` operation. if ( self.place_model_on_device and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES ): self._move_model_to_device(model, args.device) # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs if self.is_model_parallel: self.args._n_gpu = 1 # later use `self.model is self.model_wrapped` to check if it's wrapped or not self.model_wrapped = model self.model = model # Just in case the model was wrapped outside of the `Trainer` unwrapped_model = self.accelerator.unwrap_model(model) # We also unwrap peft model if _is_peft_model(unwrapped_model): if hasattr(unwrapped_model, "get_base_model"): unwrapped_model = unwrapped_model.get_base_model() elif hasattr(unwrapped_model, "base_model") and hasattr(unwrapped_model.base_model, "model"): unwrapped_model = unwrapped_model.base_model.model else: raise AttributeError("Cannot extract base model safely from this PEFT wrapper.") # Check if the model has explicit setup for loss kwargs, # if not, check if `**kwargs` are in model.forward if hasattr(unwrapped_model, "accepts_loss_kwargs"): self.model_accepts_loss_kwargs = unwrapped_model.accepts_loss_kwargs else: forward_params = inspect.signature(unwrapped_model.forward).parameters self.model_accepts_loss_kwargs = any( k.kind == inspect.Parameter.VAR_KEYWORD for k in forward_params.values() ) self.neftune_noise_alpha = args.neftune_noise_alpha self.compute_metrics = compute_metrics self.preprocess_logits_for_metrics = preprocess_logits_for_metrics self.optimizer, self.lr_scheduler = optimizers self.optimizer_cls_and_kwargs = optimizer_cls_and_kwargs if self.optimizer_cls_and_kwargs is not None and self.optimizer is not None: raise RuntimeError("Passing both `optimizers` and `optimizer_cls_and_kwargs` arguments is incompatible.") if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): raise RuntimeError( "Passing a `model_init` is incompatible with providing the `optimizers` argument. " "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." ) if is_torch_xla_available() and self.optimizer is not None: for param in self.model.parameters(): model_device = param.device break for param_group in self.optimizer.param_groups: if len(param_group["params"]) > 0: optimizer_device = param_group["params"][0].device break if model_device != optimizer_device: raise ValueError( "The model and the optimizer parameters are not on the same device, which probably means you" " created an optimizer around your model **before** putting on the device and passing it to the" " `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and" " `model.to(xm.xla_device())` is performed before the optimizer creation in your script." ) if (self.is_fsdp_xla_enabled or self.is_fsdp_enabled) and ( self.optimizer is not None or self.lr_scheduler is not None ): raise RuntimeError( "Passing `optimizers` is not allowed if PyTorch FSDP is enabled. " "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." ) default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks self.callback_handler = CallbackHandler( callbacks, self.model, self.processing_class, self.optimizer, self.lr_scheduler ) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) # Will be set to True by `self._setup_loggers()` on first call to `self.log()`. self._loggers_initialized = False # Create distant repo and output directory if needed self.hub_model_id = None if self.args.push_to_hub: self.init_hf_repo() if self.args.should_save: os.makedirs(self.args.output_dir, exist_ok=True) if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): raise TypeError("The `data_collator` should be a simple callable (function, class with `__call__`).") if args.max_steps > 0 and args.num_train_epochs > 0: logger.info("max_steps is given, it will override any value given in num_train_epochs") if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0: raise ValueError( "The train_dataset does not implement __len__, max_steps has to be specified. " "The number of steps needs to be known in advance for the learning rate scheduler." ) if ( train_dataset is not None and isinstance(train_dataset, torch.utils.data.IterableDataset) and args.group_by_length ): raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset") self._signature_columns = None # Mixed precision setup self.use_apex = False self.use_cpu_amp = False # Mixed precision setup for SageMaker Model Parallel if is_sagemaker_mp_enabled(): # BF16 + model parallelism in SageMaker: currently not supported, raise an error if args.bf16: raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ") if IS_SAGEMAKER_MP_POST_1_10: # When there's mismatch between SMP config and trainer argument, use SMP config as truth if args.fp16 != smp.state.cfg.fp16: logger.warning( f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " f"but FP16 provided in trainer argument is {args.fp16}, " f"setting to {smp.state.cfg.fp16}" ) args.fp16 = smp.state.cfg.fp16 else: # smp < 1.10 does not support fp16 in trainer. if hasattr(smp.state.cfg, "fp16"): logger.warning( f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " "but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer." ) if (args.fp16 or args.bf16) and args.half_precision_backend == "auto": if args.device == torch.device("cpu"): if args.fp16: if not is_torch_greater_or_equal_than_2_3: raise ValueError("Tried to use `fp16` but it is not supported on cpu") else: args.half_precision_backend = "cpu_amp" logger.info(f"Using {args.half_precision_backend} half precision backend") if (args.fp16 or args.bf16) and not (self.is_deepspeed_enabled or is_sagemaker_mp_enabled()): # deepspeed and SageMaker Model Parallel manage their own half precision if args.half_precision_backend == "cpu_amp": self.use_cpu_amp = True self.amp_dtype = torch.bfloat16 elif args.half_precision_backend == "apex": self.use_apex = True # Label smoothing if self.args.label_smoothing_factor != 0: self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor) else: self.label_smoother = None self.control = TrainerControl() self.state = TrainerState( is_local_process_zero=self.is_local_process_zero(), is_world_process_zero=self.is_world_process_zero(), stateful_callbacks=[ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ], ) # Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then # returned to 0 every time flos need to be logged self.current_flos = 0 self.hp_search_backend = None model_to_inspect = self.model if _is_peft_model(self.model): if hasattr(self.model, "get_base_model"): model_to_inspect = self.model.get_base_model() else: # PeftMixedModel do not provide a `get_base_model` method model_to_inspect = self.model.base_model.model default_label_names = find_labels(model_to_inspect.__class__) self.label_names = default_label_names if self.args.label_names is None else self.args.label_names self.can_return_loss = can_return_loss(model_to_inspect.__class__) self.control = self.callback_handler.on_init_end(self.args, self.state, self.control) # Internal variables to help with automatic batch size reduction self._train_batch_size = args.train_batch_size self._created_lr_scheduler = False # very last self._memory_tracker.stop_and_update_metrics() self.is_fsdp_xla_v2_enabled = args.fsdp_config.get("xla_fsdp_v2", False) if self.is_fsdp_xla_v2_enabled: if not IS_XLA_FSDPV2_POST_2_2: raise ValueError("FSDPv2 requires `torch_xla` 2.2 or higher.") # Prepare the SPMD mesh that is going to be used by the data loader and the FSDPv2 wrapper. # Tensor axis is just a placeholder where it will not be used in FSDPv2. num_devices = xr.global_runtime_device_count() xs.set_global_mesh(xs.Mesh(np.array(range(num_devices)), (num_devices, 1), axis_names=("fsdp", "tensor"))) self.is_fsdp_xla_v1_enabled = self.is_fsdp_xla_enabled and not self.is_fsdp_xla_v2_enabled @property def tokenizer(self) -> Optional[PreTrainedTokenizerBase]: logger.warning("Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.") return self.processing_class @tokenizer.setter def tokenizer(self, processing_class) -> None: logger.warning( "Trainer.tokenizer is now deprecated. You should use `Trainer.processing_class = processing_class` instead." ) self.processing_class = processing_class def _activate_neftune(self, model): r""" Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://huggingface.co/papers/2310.05914 """ unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() del unwrapped_model embeddings.neftune_noise_alpha = self.neftune_noise_alpha hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook) self.neftune_hook_handle = hook_handle return model def _deactivate_neftune(self, model): """ Deactivates the neftune method. Make sure to call `_activate_neftune` first. """ if not hasattr(self, "neftune_hook_handle"): raise ValueError("Neftune is not activated make sure to call `trainer._activate_neftune()` first") unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() self.neftune_hook_handle.remove() del embeddings.neftune_noise_alpha, unwrapped_model def add_callback(self, callback): """ Add a callback to the current list of [`~transformers.TrainerCallback`]. Args: callback (`type` or [`~transformers.TrainerCallback]`): A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the first case, will instantiate a member of that class. """ self.callback_handler.add_callback(callback) def pop_callback(self, callback): """ Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it. If the callback is not found, returns `None` (and no error is raised). Args: callback (`type` or [`~transformers.TrainerCallback]`): A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the first case, will pop the first member of that class found in the list of callbacks. Returns: [`~transformers.TrainerCallback`]: The callback removed, if found. """ return self.callback_handler.pop_callback(callback) def remove_callback(self, callback): """ Remove a callback from the current list of [`~transformers.TrainerCallback`]. Args: callback (`type` or [`~transformers.TrainerCallback]`): A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the first case, will remove the first member of that class found in the list of callbacks. """ self.callback_handler.remove_callback(callback) def _move_model_to_device(self, model, device): model = model.to(device) # Moving a model to an XLA device disconnects the tied weights, so we have to retie them. if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"): model.tie_weights() def _align_special_tokens(self): """ Aligns the special tokens of the tokenizer with the model configs. A new tokens may be defined in the tokenizer for fine-tuning purposes, e.g. an "end of turn" token may be added on chat models. In that case, we want the model configs to be aligned with the tokenizer, so that all downstream uses work as expected. This alignment should happen before training, to ensure the prediction step uses the new tokens as well. """ if isinstance(self.processing_class, ProcessorMixin): tokenizer = self.processing_class.tokenizer else: tokenizer = self.processing_class model_has_generation_config = ( hasattr(self.model, "generation_config") and self.model.generation_config is not None ) updated_tokens = {} # 1 - Align EOS token. EOS is more complex than the others, as `generation_config` may hold more than one EOS # token. tokenizer_has_new_eos = tokenizer.eos_token_id != self.model.config.eos_token_id if model_has_generation_config: # `generation_config.eos_token_id` is None: direct comparison if self.model.generation_config.eos_token_id is None: tokenizer_has_new_eos |= tokenizer.eos_token_id != self.model.generation_config.eos_token_id else: # `generation_config.eos_token_id` is an `int`: convert it to list (and continue below) if isinstance(self.model.generation_config.eos_token_id, int): self.model.generation_config.eos_token_id = [self.model.generation_config.eos_token_id] # `generation_config.eos_token_id` is a `list`: check if the tokenizer's EOS token is in the list tokenizer_has_new_eos |= tokenizer.eos_token_id not in self.model.generation_config.eos_token_id if tokenizer_has_new_eos: updated_tokens["eos_token_id"] = tokenizer.eos_token_id self.model.config.eos_token_id = tokenizer.eos_token_id # The generation config may hold more than one EOS token. We preserve the original EOS tokens: any of the # EOS tokens defined here will halt generation. if model_has_generation_config: all_eos_tokens = [tokenizer.eos_token_id] if self.model.generation_config.eos_token_id is not None: all_eos_tokens += list(self.model.generation_config.eos_token_id) self.model.generation_config.eos_token_id = [token for token in all_eos_tokens if token is not None] # 2 - Align BOS tokenizer_has_new_bos = tokenizer.bos_token_id != self.model.config.bos_token_id if model_has_generation_config: tokenizer_has_new_bos |= tokenizer.bos_token_id != self.model.generation_config.bos_token_id if tokenizer_has_new_bos: updated_tokens["bos_token_id"] = tokenizer.bos_token_id self.model.config.bos_token_id = tokenizer.bos_token_id if model_has_generation_config: self.model.generation_config.bos_token_id = tokenizer.bos_token_id # 3 - Align PAD tokenizer_has_new_pad = tokenizer.pad_token_id != self.model.config.pad_token_id if model_has_generation_config: tokenizer_has_new_pad |= tokenizer.pad_token_id != self.model.generation_config.pad_token_id if tokenizer_has_new_pad: updated_tokens["pad_token_id"] = tokenizer.pad_token_id self.model.config.pad_token_id = tokenizer.pad_token_id if model_has_generation_config: self.model.generation_config.pad_token_id = tokenizer.pad_token_id # 4 - Warn users about the changes if len(updated_tokens) > 0: logger.warning( "The tokenizer has new PAD/BOS/EOS tokens that differ from the model config and generation config. " "The model config and generation config were aligned accordingly, being updated with the tokenizer's " f"values. Updated tokens: {updated_tokens}." ) def _set_signature_columns_if_needed(self): if self._signature_columns is None: # Inspect model forward signature to keep only the arguments it accepts. model_to_inspect = self.model if _is_peft_model(self.model): if hasattr(self.model, "get_base_model"): model_to_inspect = self.model.get_base_model() else: # PeftMixedModel do not provide a `get_base_model` method model_to_inspect = self.model.base_model.model signature = inspect.signature(model_to_inspect.forward) self._signature_columns = list(signature.parameters.keys()) # Labels may be named label or label_ids, the default data collator handles that. self._signature_columns += list(set(["label", "label_ids"] + self.label_names)) def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None): if not self.args.remove_unused_columns: return dataset self._set_signature_columns_if_needed() signature_columns = self._signature_columns ignored_columns = list(set(dataset.column_names) - set(signature_columns)) if len(ignored_columns) > 0: dset_description = "" if description is None else f"in the {description} set" logger.info( f"The following columns {dset_description} don't have a corresponding argument in " f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}." f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, " " you can safely ignore this message." ) columns = [k for k in signature_columns if k in dataset.column_names] if len(columns) == 0: raise ValueError( f"No columns in the dataset match the model's forward method signature: ({', '.join(signature_columns)}). " f"The following columns have been ignored: [{', '.join(ignored_columns)}]. " "Please check the dataset and model. You may need to set `remove_unused_columns=False` in `TrainingArguments`." ) if version.parse(datasets.__version__) < version.parse("1.4.0"): dataset.set_format( type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"] ) return dataset else: return dataset.remove_columns(ignored_columns) def _get_collator_with_removed_columns( self, data_collator: Callable, description: Optional[str] = None ) -> Callable: """Wrap the data collator in a callable removing unused columns.""" if not self.args.remove_unused_columns: return data_collator self._set_signature_columns_if_needed() signature_columns = self._signature_columns remove_columns_collator = RemoveColumnsCollator( data_collator=data_collator, signature_columns=signature_columns, logger=logger, description=description, model_name=self.model.__class__.__name__, ) return remove_columns_collator def _get_train_sampler(self, train_dataset: Optional[Dataset] = None) -> Optional[torch.utils.data.Sampler]: if train_dataset is None: train_dataset = self.train_dataset if train_dataset is None or not has_length(train_dataset): return None # Build the sampler. if self.args.group_by_length: if is_datasets_available() and isinstance(train_dataset, datasets.Dataset): lengths = ( train_dataset[self.args.length_column_name] if self.args.length_column_name in train_dataset.column_names else None ) else: lengths = None model_input_name = ( self.processing_class.model_input_names[0] if self.processing_class is not None else None ) return LengthGroupedSampler( self.args.train_batch_size * self.args.gradient_accumulation_steps, dataset=train_dataset, lengths=lengths, model_input_name=model_input_name, ) else: return RandomSampler(train_dataset) def _get_dataloader( self, dataset: Dataset, description: str, batch_size: int, sampler_fn: Optional[Callable[[Dataset], torch.utils.data.Sampler]] = None, is_training: bool = False, dataloader_key: Optional[str] = None, ) -> DataLoader: """Create a [`~torch.utils.data.DataLoader`] from the given dataset.""" data_collator = self.data_collator if is_datasets_available() and isinstance(dataset, datasets.Dataset): dataset = self._remove_unused_columns(dataset, description=description) else: data_collator = self._get_collator_with_removed_columns(self.data_collator, description=description) dataloader_params = { "batch_size": batch_size, "collate_fn": data_collator, "num_workers": self.args.dataloader_num_workers, "pin_memory": self.args.dataloader_pin_memory, "persistent_workers": self.args.dataloader_persistent_workers, } if not isinstance(dataset, torch.utils.data.IterableDataset): if sampler_fn is not None: dataloader_params["sampler"] = sampler_fn(dataset) dataloader_params["drop_last"] = self.args.dataloader_drop_last dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor if is_training: dataloader_params["worker_init_fn"] = partial( seed_worker, num_workers=self.args.dataloader_num_workers, rank=self.args.process_index ) dataloader = self.accelerator.prepare(DataLoader(dataset, **dataloader_params)) # Store the prepared dataloader for subsequent evaluations if using persistent workers. if dataloader_key is not None and self.args.dataloader_persistent_workers: if hasattr(self, "_eval_dataloaders"): self._eval_dataloaders[dataloader_key] = dataloader else: self._eval_dataloaders = {dataloader_key: dataloader} return dataloader def get_train_dataloader(self) -> DataLoader: """ Returns the training [`~torch.utils.data.DataLoader`]. Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed training if necessary) otherwise. Subclass and override this method if you want to inject some custom behavior. """ if self.train_dataset is None: raise ValueError("Trainer: training requires a train_dataset.") return self._get_dataloader( dataset=self.train_dataset, description="Training", batch_size=self._train_batch_size, sampler_fn=self._get_train_sampler, is_training=True, ) def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]: if eval_dataset is None or not has_length(eval_dataset): return None # Build the sampler. # Deprecated code if self.args.use_legacy_prediction_loop: if is_torch_xla_available(): return SequentialDistributedSampler( eval_dataset, num_replicas=xr.world_size(), rank=xr.global_ordinal() ) elif is_sagemaker_mp_enabled(): return SequentialDistributedSampler( eval_dataset, num_replicas=smp.dp_size(), rank=smp.dp_rank(), batch_size=self.args.per_device_eval_batch_size, ) else: return SequentialSampler(eval_dataset) if self.args.group_by_length: if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): lengths = ( eval_dataset[self.args.length_column_name] if self.args.length_column_name in eval_dataset.column_names else None ) else: lengths = None model_input_name = ( self.processing_class.model_input_names[0] if self.processing_class is not None else None ) return LengthGroupedSampler( self.args.eval_batch_size, dataset=eval_dataset, lengths=lengths, model_input_name=model_input_name, ) if self.args.world_size <= 1: return SequentialSampler(eval_dataset) else: return None def get_eval_dataloader(self, eval_dataset: Optional[Union[str, Dataset]] = None) -> DataLoader: """ Returns the evaluation [`~torch.utils.data.DataLoader`]. Subclass and override this method if you want to inject some custom behavior. Args: eval_dataset (`str` or `torch.utils.data.Dataset`, *optional*): If a `str`, will use `self.eval_dataset[eval_dataset]` as the evaluation dataset. If a `Dataset`, will override `self.eval_dataset` and must implement `__len__`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. """ if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") # If we have persistent workers, don't do a fork bomb especially as eval datasets # don't change during training dataloader_key = eval_dataset if isinstance(eval_dataset, str) else "eval" if ( hasattr(self, "_eval_dataloaders") and dataloader_key in self._eval_dataloaders and self.args.dataloader_persistent_workers ): return self._eval_dataloaders[dataloader_key] eval_dataset = ( self.eval_dataset[eval_dataset] if isinstance(eval_dataset, str) else eval_dataset if eval_dataset is not None else self.eval_dataset ) return self._get_dataloader( dataset=eval_dataset, description="Evaluation", batch_size=self.args.eval_batch_size, sampler_fn=self._get_eval_sampler, dataloader_key=dataloader_key, ) def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: """ Returns the test [`~torch.utils.data.DataLoader`]. Subclass and override this method if you want to inject some custom behavior. Args: test_dataset (`torch.utils.data.Dataset`, *optional*): The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. It must implement `__len__`. """ return self._get_dataloader( dataset=test_dataset, description="test", batch_size=self.args.eval_batch_size, sampler_fn=self._get_eval_sampler, ) def create_optimizer_and_scheduler(self, num_training_steps: int): """ Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or `create_scheduler`) in a subclass. """ self.create_optimizer() if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16: # If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer optimizer = self.optimizer.optimizer else: optimizer = self.optimizer self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) def get_decay_parameter_names(self, model) -> list[str]: """ Get all parameter names that weight decay will be applied to. This function filters out parameters in two ways: 1. By layer type (instances of layers specified in ALL_LAYERNORM_LAYERS) 2. By parameter name patterns (containing 'bias', or variation of 'norm') """ forbidden_name_patterns = [r"bias", r"layernorm", r"rmsnorm", r"(?:^|\.)norm(?:$|\.)", r"_norm(?:$|\.)"] decay_parameters = get_parameter_names(model, [nn.LayerNorm], forbidden_name_patterns) return decay_parameters def create_optimizer(self): """ Setup the optimizer. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init through `optimizers`, or subclass and override this method in a subclass. """ opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model if self.optimizer is None: decay_parameters = self.get_decay_parameter_names(opt_model) optimizer_grouped_parameters = [ { "params": [ p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) ], "weight_decay": self.args.weight_decay, }, { "params": [ p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) ], "weight_decay": 0.0, }, ] if self.optimizer_cls_and_kwargs is not None: optimizer_cls, optimizer_kwargs = self.optimizer_cls_and_kwargs else: optimizer_cls, optimizer_kwargs = self.get_optimizer_cls_and_kwargs(self.args, opt_model) # Overwrite `params` in case it's created by `get_optimizer_cls_and_kwargs` # e.g. for GaLore optimizer. if "params" in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop("params") # Overwrite `model` in case it's created by `get_optimizer_cls_and_kwargs` # e.g. for LOMO optimizer. if "model" in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop("model") # For layer-wise dummy optimizers we overwrite optimizer_grouped_parameters with `optimizer_dict` # to avoid arguments conflicts. if "optimizer_dict" in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop("optimizer_dict") self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) if "bitsandbytes" in str(optimizer_cls) and optimizer_kwargs.get("optim_bits", None) == 8: import bitsandbytes manager = bitsandbytes.optim.GlobalOptimManager.get_instance() skipped = 0 for module in opt_model.modules(): if isinstance(module, nn.Embedding): skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) logger.info(f"skipped {module}: {skipped / 2**20}M params") manager.register_module_override(module, "weight", {"optim_bits": 32}) logger.debug(f"bitsandbytes: will optimize {module} in fp32") logger.info(f"skipped: {skipped / 2**20}M params") if is_sagemaker_mp_enabled(): self.optimizer = smp.DistributedOptimizer(self.optimizer) return self.optimizer def get_num_trainable_parameters(self): """ Get the number of trainable parameters. """ return sum(p.numel() for p in self.model.parameters() if p.requires_grad) def get_learning_rates(self): """ Returns the learning rate of each parameter from self.optimizer. """ if self.optimizer is None: raise ValueError("Trainer optimizer is None, please make sure you have setup the optimizer before.") return [group["lr"] for group in self.optimizer.param_groups] def get_optimizer_group(self, param: Optional[Union[str, torch.nn.parameter.Parameter]] = None): """ Returns optimizer group for a parameter if given, else returns all optimizer groups for params. Args: param (`str` or `torch.nn.parameter.Parameter`, *optional*): The parameter for which optimizer group needs to be returned. """ if self.optimizer is None: raise ValueError("Trainer optimizer is None, please make sure you have setup the optimizer before.") if param is not None: for group in self.optimizer.param_groups: if param in group["params"]: return group return [group["params"] for group in self.optimizer.param_groups] @staticmethod def get_optimizer_cls_and_kwargs( args: TrainingArguments, model: Optional[PreTrainedModel] = None ) -> tuple[Any, Any]: """ Returns the optimizer class and optimizer parameters based on the training arguments. Args: args (`transformers.training_args.TrainingArguments`): The training arguments for the training session. """ # parse args.optim_args optim_args = {} if args.optim_args: for mapping in args.optim_args.replace(" ", "").split(","): key, value = mapping.split("=") optim_args[key] = value optimizer_kwargs = {"lr": args.learning_rate} adam_kwargs = { "betas": (args.adam_beta1, args.adam_beta2), "eps": args.adam_epsilon, } def setup_low_rank_optimizer( optimizer_name: str, optimizer_mapping: dict[str, Any], optim_kwargs: dict[str, Any], is_layerwise_supported: bool = True, ) -> tuple[Any, Any]: """ Helper function to set up low-rank optimizers like GaLore and Apollo. Args: optimizer_name (str): Name of the optimizer. optimizer_mapping (dict): Mapping of optimizer names to their classes. optim_kwargs (dict): Keyword arguments for the optimizer. is_layerwise_supported (bool): Whether layerwise optimization is supported. Returns: tuple[Any, Any]: Optimizer class and updated optimizer kwargs. """ is_layerwise = optimizer_name.lower().endswith("layerwise") if is_layerwise and args.parallel_mode == ParallelMode.DISTRIBUTED and is_layerwise_supported: raise NotImplementedError(f"Layer-wise {optimizer_name} does not support DDP at this time") optimizer_cls = optimizer_mapping[optimizer_name] if args.optim_target_modules is None: raise ValueError(f"You need to define `optim_target_modules` to use {optimizer_name} optimizers") if not isinstance(args.optim_target_modules, (list, str)): raise TypeError( f"`optim_target_modules` must be a list of strings, a regex string, or 'all-linear'. Got: {args.optim_target_modules}" ) if model is None: raise ValueError(f"You need to pass a model to initialize {optimizer_name} optimizer.") all_linear = ( isinstance(args.optim_target_modules, str) and args.optim_target_modules.replace("_", "-") == "all-linear" ) target_params_names = [] for module_name, module in model.named_modules(): target_module_exists, is_regex = check_target_module_exists( args.optim_target_modules, module_name, return_is_regex=True ) if not isinstance(module, nn.Linear): if target_module_exists and not is_regex: logger.warning( f"{module_name} matched but ignored. {optimizer_name} only supports linear layers." ) continue if not target_module_exists and not all_linear: continue target_params_names.append(module_name + ".weight") if len(target_params_names) == 0: raise ValueError(f"No target modules found for {optimizer_name} ({args.optim_target_modules}).") target_params = [p for n, p in model.named_parameters() if n in target_params_names] non_target_params = [p for n, p in model.named_parameters() if n not in target_params_names] optim_kwargs.update(optim_args) param_groups = [ {"params": non_target_params}, {"params": target_params, **optim_kwargs}, ] if is_layerwise: if args.gradient_accumulation_steps != 1: raise ValueError(f"Layerwise {optimizer_name} does not support gradient accumulation!") optimizer_dict = {} for param in non_target_params: optimizer_dict[param] = optimizer_cls([{"params": [param]}], **optimizer_kwargs) for param in target_params: optimizer_dict[param] = optimizer_cls([{"params": [param], **optim_kwargs}], **optimizer_kwargs) def optimizer_hook(param): if param.grad is not None: optimizer_dict[param].step() optimizer_dict[param].zero_grad() for param in model.parameters(): if param.requires_grad: param.register_post_accumulate_grad_hook(optimizer_hook) optimizer_cls = LayerWiseDummyOptimizer optimizer_kwargs.update({"optimizer_dict": optimizer_dict}) optimizer_kwargs.update({"params": param_groups}) return optimizer_cls, optimizer_kwargs if args.optim == OptimizerNames.ADAFACTOR: optimizer_cls = Adafactor optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]: from torch.optim import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) if args.optim == OptimizerNames.ADAMW_TORCH_FUSED: optimizer_kwargs.update({"fused": True}) elif args.optim == OptimizerNames.ADAMW_TORCH_XLA: try: from torch_xla.amp.syncfree import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.") elif args.optim == OptimizerNames.ADAMW_TORCH_NPU_FUSED: try: from torch_npu.optim import NpuFusedAdamW optimizer_cls = NpuFusedAdamW optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer failed to import FusedAdamW from torch_npu.") elif args.optim == OptimizerNames.ADAMW_APEX_FUSED: try: from apex.optimizers import FusedAdam optimizer_cls = FusedAdam optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!") elif args.optim in [ OptimizerNames.ADAMW_BNB, OptimizerNames.ADAMW_8BIT, OptimizerNames.PAGED_ADAMW, OptimizerNames.PAGED_ADAMW_8BIT, OptimizerNames.ADEMAMIX, OptimizerNames.ADEMAMIX_8BIT, OptimizerNames.PAGED_ADEMAMIX, OptimizerNames.PAGED_ADEMAMIX_8BIT, OptimizerNames.LION, OptimizerNames.LION_8BIT, OptimizerNames.PAGED_LION, OptimizerNames.PAGED_LION_8BIT, OptimizerNames.RMSPROP_BNB, OptimizerNames.RMSPROP_8BIT, OptimizerNames.RMSPROP_32BIT, ]: try: from bitsandbytes.optim import AdamW, Lion, RMSprop is_paged = False optim_bits = 32 optimizer_cls = None additional_optim_kwargs = adam_kwargs if "paged" in args.optim: is_paged = True if "8bit" in args.optim: optim_bits = 8 if "adam" in args.optim: optimizer_cls = AdamW elif "lion" in args.optim: optimizer_cls = Lion additional_optim_kwargs = {"betas": (args.adam_beta1, args.adam_beta2)} elif "rmsprop" in args.optim: optimizer_cls = RMSprop # Above we pass all `adam_kwargs` to the optimizer, here # we only pass `optim_args` which can be passed by the user. additional_optim_kwargs = optim_args elif "ademamix" in args.optim: if is_bitsandbytes_available() and version.parse( importlib.metadata.version("bitsandbytes") ) < version.parse("0.44.0"): raise ValueError( "The AdEMAMix optimizer is not supported by your current version of `bitsandbytes`. " "Please install `bitsandbytes` >= 0.44.0." ) from bitsandbytes.optim import AdEMAMix optimizer_cls = AdEMAMix additional_optim_kwargs = { "betas": ( float(optim_args.get("beta1", args.adam_beta1)), float(optim_args.get("beta2", args.adam_beta2)), float(optim_args.get("beta3", 0.9999)), ), "alpha": float(optim_args.get("alpha", 5.0)), "eps": float(optim_args.get("eps", args.adam_epsilon)), } if "t_alpha" in optim_args: additional_optim_kwargs["t_alpha"] = int(optim_args["t_alpha"]) if "t_beta3" in optim_args: additional_optim_kwargs["t_beta3"] = int(optim_args["t_beta3"]) bnb_kwargs = {"optim_bits": optim_bits} if "rmsprop" not in args.optim: bnb_kwargs["is_paged"] = is_paged optimizer_kwargs.update(additional_optim_kwargs) optimizer_kwargs.update(bnb_kwargs) except ImportError: raise ValueError("Trainer tried to instantiate bnb optimizer but `bitsandbytes` is not installed!") if is_bitsandbytes_available() and version.parse( importlib.metadata.version("bitsandbytes") ) < version.parse("0.41.1"): logger.warning( "You are using 8-bit optimizers with a version of `bitsandbytes` < 0.41.1. " "It is recommended to update your version as a major bug has been fixed in 8-bit optimizers." ) elif args.optim == OptimizerNames.ADAMW_ANYPRECISION: try: from torchdistx.optimizers import AnyPrecisionAdamW optimizer_cls = AnyPrecisionAdamW optimizer_kwargs.update(adam_kwargs) # TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx. optimizer_kwargs.update( { "use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")), "momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")), "variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")), "compensation_buffer_dtype": getattr( torch, optim_args.get("compensation_buffer_dtype", "bfloat16") ), } ) except ImportError: raise ValueError("Please install https://github.com/pytorch/torchdistx") elif args.optim == OptimizerNames.SGD: optimizer_cls = torch.optim.SGD elif args.optim == OptimizerNames.ADAGRAD: optimizer_cls = torch.optim.Adagrad elif args.optim == OptimizerNames.RMSPROP: optimizer_cls = torch.optim.RMSprop elif args.optim in [ OptimizerNames.GALORE_ADAMW, OptimizerNames.GALORE_ADAMW_8BIT, OptimizerNames.GALORE_ADAFACTOR, OptimizerNames.GALORE_ADAMW_LAYERWISE, OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE, OptimizerNames.GALORE_ADAFACTOR_LAYERWISE, ]: if not is_galore_torch_available(): raise ImportError( "You need to install `galore_torch` in order to use GaLore optimizers" " install it with `pip install git+https://github.com/jiaweizzhao/GaLore`" ) from galore_torch import GaLoreAdafactor, GaLoreAdamW, GaLoreAdamW8bit optimizer_mapping = { OptimizerNames.GALORE_ADAMW: GaLoreAdamW, OptimizerNames.GALORE_ADAMW_8BIT: GaLoreAdamW8bit, OptimizerNames.GALORE_ADAFACTOR: GaLoreAdafactor, OptimizerNames.GALORE_ADAMW_LAYERWISE: GaLoreAdamW, OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE: GaLoreAdamW8bit, OptimizerNames.GALORE_ADAFACTOR_LAYERWISE: GaLoreAdafactor, } galore_optim_kwargs = { "rank": int(optim_args.pop("rank", 128)), "update_proj_gap": int(optim_args.pop("update_proj_gap", 200)), "scale": float(optim_args.pop("scale", 0.25)), "proj_type": optim_args.pop("proj_type", "std"), } optimizer_cls, optimizer_kwargs = setup_low_rank_optimizer( args.optim, optimizer_mapping, galore_optim_kwargs ) if args.optim == OptimizerNames.GALORE_ADAFACTOR: optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) elif args.optim in [ OptimizerNames.APOLLO_ADAMW, OptimizerNames.APOLLO_ADAMW_LAYERWISE, ]: if not is_apollo_torch_available(): raise ImportError( "You need to install `apollo_torch` in order to use APOLLO optimizers" " install it with `pip install git+https://github.com/zhuhanqing/APOLLO`" ) from apollo_torch import APOLLOAdamW optimizer_mapping = { OptimizerNames.APOLLO_ADAMW: APOLLOAdamW, OptimizerNames.APOLLO_ADAMW_LAYERWISE: APOLLOAdamW, } apollo_optim_kwargs = { "rank": int(optim_args.pop("rank", 128)), "proj": optim_args.pop("proj", "random"), "scale_type": optim_args.pop("scale_type", "channel"), "update_proj_gap": int(optim_args.pop("update_proj_gap", 200)), "scale": float(optim_args.pop("scale", 1.0)), "proj_type": optim_args.pop("proj_type", "std"), } apollo_optim_kwargs.update(adam_kwargs) optimizer_cls, optimizer_kwargs = setup_low_rank_optimizer( args.optim, optimizer_mapping, apollo_optim_kwargs ) elif args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: if not is_lomo_available(): raise ImportError( "You need to install `lomo_optim` in order to use LOMO optimizers" " install it with `pip install lomo-optim`" ) if not is_accelerate_available("0.30.0"): raise ImportError("You need to have `accelerate>=0.30.0` to be able to use LOMO optimizers") if model is None: raise ValueError("You need to pass a `model` in order to correctly initialize a LOMO optimizer.") from lomo_optim import AdaLomo, Lomo if "ada" in args.optim: optimizer_cls = AdaLomo else: optimizer_cls = Lomo optimizer_kwargs.update({"model": model}) elif args.optim == OptimizerNames.GROKADAMW: if not is_grokadamw_available(): raise ValueError("Please install grokadamw with `pip install grokadamw`") from grokadamw import GrokAdamW optimizer_cls = GrokAdamW optimizer_kwargs.update( { "alpha_init": float(optim_args.get("alpha_init", 0.98)), "lamb": float(optim_args.get("lamb", 2.0)), "gamma": float(optim_args.get("gamma", 0.1)), "grokking_signal_decay_rate": float(optim_args.get("grokking_signal_decay_rate", 0.1)), "gradient_clipping": float(optim_args.get("gradient_clipping", 1.0)), } ) elif args.optim in [ OptimizerNames.ADAMW_TORCH_4BIT, OptimizerNames.ADAMW_TORCH_8BIT, ]: if not is_torchao_available() or version.parse(importlib.metadata.version("torchao")) < version.parse( "0.4.0" ): raise ImportError( "You need to have `torchao>=0.4.0` in order to use torch 4-bit optimizers." "Install it with `pip install torchao` or follow the instructions here: https://github.com/pytorch/ao" ) if version.parse(importlib.metadata.version("torch")) <= version.parse("2.4"): raise ImportError( "You need to have `torch>2.4` in order to use torch 4-bit optimizers. " "Install it with `pip install --upgrade torch` it is available on pipy. Otherwise, you need to install torch nightly." ) if version.parse(importlib.metadata.version("torchao")) >= version.parse("0.11.0"): # https://github.com/pytorch/ao/pull/2159 from torchao.optim import AdamW4bit, AdamW8bit else: from torchao.prototype.low_bit_optim import AdamW4bit, AdamW8bit if args.optim == OptimizerNames.ADAMW_TORCH_4BIT: optimizer_cls = AdamW4bit elif args.optim == OptimizerNames.ADAMW_TORCH_8BIT: optimizer_cls = AdamW8bit else: raise ValueError("Invalid optimizer") optimizer_kwargs.update(adam_kwargs) elif args.optim in [ OptimizerNames.SCHEDULE_FREE_RADAM, OptimizerNames.SCHEDULE_FREE_ADAMW, OptimizerNames.SCHEDULE_FREE_SGD, ]: if not is_schedulefree_available(): raise ImportError( "You need to install `schedulefree` in order to use schedulefree optimizers. " "Install it with `pip install schedulefree.`" ) if not is_accelerate_available("0.30.0"): raise ImportError("You need to have `accelerate>=0.30.0` to be able to use schedulefree optimizers") from schedulefree import AdamWScheduleFree, SGDScheduleFree additional_optim_kwargs = {} require_warmup = True if args.optim == OptimizerNames.SCHEDULE_FREE_RADAM: if not is_schedulefree_available("1.4.0"): raise ImportError( "You need to install `schedulefree>=1.4.0` in order to use RAdamScheduleFree optimizer. " "Install it with `pip install schedulefree.`" ) from schedulefree import RAdamScheduleFree optimizer_cls = RAdamScheduleFree additional_optim_kwargs = adam_kwargs require_warmup = False elif args.optim == OptimizerNames.SCHEDULE_FREE_ADAMW: optimizer_cls = AdamWScheduleFree additional_optim_kwargs = adam_kwargs elif args.optim == OptimizerNames.SCHEDULE_FREE_SGD: optimizer_cls = SGDScheduleFree else: raise ValueError("Invalid schedulefree optimizer") additional_optim_kwargs["weight_decay"] = args.weight_decay if require_warmup: additional_optim_kwargs["warmup_steps"] = args.warmup_steps additional_optim_kwargs.update( { "weight_lr_power": float(optim_args.get("weight_lr_power", 2.0)), "r": float(optim_args.get("r", 0.0)), } ) optimizer_kwargs.update(additional_optim_kwargs) elif args.optim == OptimizerNames.STABLE_ADAMW: if not is_torch_optimi_available(): raise ImportError( "You need to install `torch-optimi` in order to use stable_adamw optimizers. " "Install it with `pip install torch-optimi`." ) from optimi import StableAdamW max_lr = optim_args.pop("max_lr", None) if max_lr is not None: max_lr = float(max_lr) kahan_sum = optim_args.pop("kahan_sum", None) if kahan_sum is not None: kahan_sum = bool(kahan_sum) adam_kwargs["weight_decay"] = args.weight_decay stable_adamw_kwargs = { "decouple_lr": bool(optim_args.pop("decouple_lr", False)), "max_lr": max_lr, "kahan_sum": kahan_sum, } optimizer_cls = StableAdamW optimizer_kwargs.update(adam_kwargs) optimizer_kwargs.update(stable_adamw_kwargs) else: raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}") return optimizer_cls, optimizer_kwargs def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None): """ Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument. Args: num_training_steps (int): The number of training steps to do. """ if self.lr_scheduler is None: self.lr_scheduler = get_scheduler( self.args.lr_scheduler_type, optimizer=self.optimizer if optimizer is None else optimizer, num_warmup_steps=self.args.get_warmup_steps(num_training_steps), num_training_steps=num_training_steps, scheduler_specific_kwargs=self.args.lr_scheduler_kwargs, ) self._created_lr_scheduler = True return self.lr_scheduler def num_examples(self, dataloader: DataLoader) -> int: """ Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When dataloader.dataset does not exist or has no length, estimates as best it can """ try: dataset = dataloader.dataset # Special case for IterableDatasetShard, we need to dig deeper if isinstance(dataset, IterableDatasetShard): return len(dataloader.dataset.dataset) return len(dataloader.dataset) except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader return len(dataloader) * self.args.per_device_train_batch_size @staticmethod def num_tokens(train_dl: DataLoader, max_steps: Optional[int] = None) -> int: """ Helper to get number of tokens in a [`~torch.utils.data.DataLoader`] by enumerating dataloader. """ train_tokens = 0 try: for batch in train_dl: tokens = batch["input_ids"].numel() if max_steps is not None: return tokens * max_steps train_tokens += tokens except KeyError: logger.warning("Cannot get num_tokens from dataloader") return train_tokens def _hp_search_setup(self, trial: Union["optuna.Trial", dict[str, Any]]): """HP search setup code""" self._trial = trial if self.hp_search_backend is None or trial is None: return if self.hp_search_backend == HPSearchBackend.OPTUNA: params = self.hp_space(trial) elif self.hp_search_backend == HPSearchBackend.RAY: params = trial params.pop("wandb", None) elif self.hp_search_backend == HPSearchBackend.SIGOPT: params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()} elif self.hp_search_backend == HPSearchBackend.WANDB: params = trial for key, value in params.items(): if not hasattr(self.args, key): logger.warning( f"Trying to set {key} in the hyperparameter search but there is no corresponding field in" " `TrainingArguments`." ) continue old_attr = getattr(self.args, key, None) # Casting value to the proper type if old_attr is not None: value = type(old_attr)(value) setattr(self.args, key, value) if self.hp_search_backend == HPSearchBackend.OPTUNA: logger.info(f"Trial: {trial.params}") if self.hp_search_backend == HPSearchBackend.SIGOPT: logger.info(f"SigOpt Assignments: {trial.assignments}") if self.hp_search_backend == HPSearchBackend.WANDB: logger.info(f"W&B Sweep parameters: {trial}") if self.is_deepspeed_enabled: if self.args.deepspeed is None: raise ValueError("For sweeps with deepspeed, `args.deepspeed` must be set") self.accelerator.free_memory() # Rebuild the deepspeed config to reflect the updated training parameters from accelerate.utils import DeepSpeedPlugin from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed) self.args.hf_deepspeed_config.trainer_config_process(self.args) self.args.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.args.hf_deepspeed_config) # From 1.0 on, we need to fully wipe the DS plugin when doing sweeps. # Simply calling `_reset_state` is enough and doesn't need a version pin. AcceleratorState()._reset_state() self.create_accelerator_and_postprocess() def _report_to_hp_search(self, trial: Union["optuna.Trial", dict[str, Any]], step: int, metrics: dict[str, float]): if self.hp_search_backend is None or trial is None: return metrics = metrics.copy() self.objective = self.compute_objective(metrics) if self.hp_search_backend == HPSearchBackend.OPTUNA: import optuna if hasattr(trial, "study") and not trial.study._is_multi_objective(): trial.report(self.objective, step) if trial.should_prune(): self.callback_handler.on_train_end(self.args, self.state, self.control) raise optuna.TrialPruned() elif self.hp_search_backend == HPSearchBackend.RAY: import ray.train with tempfile.TemporaryDirectory() as temp_checkpoint_dir: checkpoint = None if self.control.should_save: self._tune_save_checkpoint(checkpoint_dir=temp_checkpoint_dir) checkpoint = ray.train.Checkpoint.from_directory(temp_checkpoint_dir) metrics["objective"] = self.objective ray.train.report(metrics, checkpoint=checkpoint) def _tune_save_checkpoint(self, checkpoint_dir: str): output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}") self.save_model(output_dir, _internal_call=True) if self.args.should_save: # Update the `TrainerControl` state to where we are currently self.state.stateful_callbacks["TrainerControl"] = self.control.state() self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) def call_model_init(self, trial=None): model_init_argcount = number_of_arguments(self.model_init) if model_init_argcount == 0: model = self.model_init() elif model_init_argcount == 1: model = self.model_init(trial) else: raise RuntimeError("model_init should have 0 or 1 argument.") if model is None: raise RuntimeError("model_init should not return None.") return model def torch_jit_model_eval(self, model, dataloader, training=False): if not training: if dataloader is None: logger.warning("failed to use PyTorch jit mode due to current dataloader is none.") return model example_batch = next(iter(dataloader)) example_batch = self._prepare_inputs(example_batch) try: jit_model = copy.copy(model) jit_model.eval() original_forward = jit_model.__dict__.pop("_original_forward", None) # remove mixed precision hooks from the model if original_forward: jit_model.forward = original_forward autocast_handler = AutocastKwargs(cache_enabled=False) with self.accelerator.autocast(autocast_handler=autocast_handler), torch.no_grad(): if isinstance(example_batch, dict): jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False) else: jit_model = torch.jit.trace( jit_model, example_kwarg_inputs={key: example_batch[key] for key in example_batch}, strict=False, ) jit_model = torch.jit.freeze(jit_model) with torch.no_grad(): jit_model(**example_batch) jit_model(**example_batch) model = jit_model self.use_cpu_amp = False except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e: logger.warning(f"failed to use PyTorch jit mode due to: {e}.") return model def compare_trainer_and_checkpoint_args(self, training_args, trainer_state): attributes_map = { "logging_steps": "logging_steps", "eval_steps": "eval_steps", "save_steps": "save_steps", } has_warning = False warning_str = "Warning: The following arguments do not match the ones in the `trainer_state.json` within the checkpoint directory: " for arg_attr, state_attr in attributes_map.items(): arg_value = getattr(training_args, arg_attr, None) state_value = getattr(trainer_state, state_attr, None) if arg_value is not None and state_value is not None and arg_value != state_value: warning_str += f"\n\t{arg_attr}: {arg_value} (from args) != {state_value} (from trainer_state.json)" has_warning = True # train bs is special as we need to account for multi-GPU train_bs_args = training_args.per_device_train_batch_size train_bs_state = trainer_state.train_batch_size // max(1, training_args.n_gpu) if train_bs_args != train_bs_state: warning_str += f"\n\tper_device_train_batch_size: {train_bs_args} (from args) != {train_bs_state} (from trainer_state.json)" has_warning = True if has_warning: logger.warning_once(warning_str) def _wrap_model(self, model, training=True, dataloader=None): if is_sagemaker_mp_enabled(): # Wrapping the base model twice in a DistributedModel will raise an error. if isinstance(self.model_wrapped, smp.model.DistributedModel): return self.model_wrapped return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps) # train/eval could be run multiple-times - if already wrapped, don't re-wrap it again if self.accelerator.unwrap_model(model, keep_torch_compile=False) is not model: return model # Mixed precision training with apex if self.use_apex and training: from apex import amp model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) # Multi-gpu training (should be after apex fp16 initialization) / 8bit models does not support DDP if self.args.n_gpu > 1 and not getattr(model, "is_loaded_in_8bit", False): model = nn.DataParallel(model) if self.args.jit_mode_eval: start_time = time.time() model = self.torch_jit_model_eval(model, dataloader, training) self.jit_compilation_time = round(time.time() - start_time, 4) # Note: in torch.distributed mode, there's no point in wrapping the model # inside a DistributedDataParallel as we'll be under `no_grad` anyways. if not training: return model # Distributed training (should be after apex fp16 initialization) # Distributed training using PyTorch FSDP if self.is_fsdp_xla_enabled: try: from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP from torch_xla.distributed.fsdp import checkpoint_module from torch_xla.distributed.fsdp.wrap import ( size_based_auto_wrap_policy, transformer_auto_wrap_policy, ) if self.is_fsdp_xla_v2_enabled: from torch_xla.experimental.spmd_fully_sharded_data_parallel import ( SpmdFullyShardedDataParallel as FSDPv2, ) except ImportError: raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.") auto_wrap_policy = None auto_wrapper_callable = None default_transformer_cls_names_to_wrap = getattr(model, "_no_split_modules", None) fsdp_transformer_layer_cls_to_wrap = self.args.fsdp_config.get( "transformer_layer_cls_to_wrap", default_transformer_cls_names_to_wrap ) if self.args.fsdp_config["min_num_params"] > 0: auto_wrap_policy = functools.partial( size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["min_num_params"] ) elif fsdp_transformer_layer_cls_to_wrap is not None: transformer_cls_to_wrap = set() for layer_class in fsdp_transformer_layer_cls_to_wrap: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise Exception("Could not find the transformer layer class to wrap in the model.") else: transformer_cls_to_wrap.add(transformer_cls) auto_wrap_policy = functools.partial( transformer_auto_wrap_policy, # Transformer layer class to wrap transformer_layer_cls=transformer_cls_to_wrap, ) fsdp_kwargs = self.args.xla_fsdp_config if self.args.fsdp_config["xla_fsdp_grad_ckpt"]: if model.config.use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) model.config.use_cache = False # Apply gradient checkpointing to auto-wrapped sub-modules if specified def auto_wrapper_callable(m, *args, **kwargs): target_cls = FSDP if not self.is_fsdp_xla_v2_enabled else FSDPv2 return target_cls(checkpoint_module(m), *args, **kwargs) # Wrap the base model with an outer FSDP wrapper if self.is_fsdp_xla_v2_enabled: def shard_output(output, mesh): from .modeling_outputs import CausalLMOutputWithPast real_output = None if isinstance(output, torch.Tensor): real_output = output elif isinstance(output, tuple): real_output = output[0] elif isinstance(output, CausalLMOutputWithPast): real_output = output.logits if real_output is None: raise ValueError("Something went wrong, the output of the model shouldn't be `None`") xs.mark_sharding(real_output, mesh, ("fsdp", None, None)) self.model = model = FSDPv2( model, shard_output=shard_output, auto_wrap_policy=auto_wrap_policy, auto_wrapper_callable=auto_wrapper_callable, ) else: self.model = model = FSDP( model, auto_wrap_policy=auto_wrap_policy, auto_wrapper_callable=auto_wrapper_callable, **fsdp_kwargs, ) # Patch `xm.optimizer_step` should not reduce gradients in this case, # as FSDP does not need gradient reduction over sharded parameters. def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}): loss = optimizer.step(**optimizer_args) if barrier: xm.mark_step() return loss xm.optimizer_step = patched_optimizer_step elif is_sagemaker_dp_enabled(): model = nn.parallel.DistributedDataParallel( model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))] ) elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: if is_torch_neuroncore_available(): return model kwargs = {} if self.args.ddp_find_unused_parameters is not None: kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters elif isinstance(model, PreTrainedModel): # find_unused_parameters breaks checkpointing as per # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021 kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing else: kwargs["find_unused_parameters"] = True if self.args.ddp_bucket_cap_mb is not None: kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb if self.args.ddp_broadcast_buffers is not None: kwargs["broadcast_buffers"] = self.args.ddp_broadcast_buffers self.accelerator.ddp_handler = DistributedDataParallelKwargs(**kwargs) return model def train( self, resume_from_checkpoint: Optional[Union[str, bool]] = None, trial: Union["optuna.Trial", dict[str, Any], None] = None, ignore_keys_for_eval: Optional[list[str]] = None, **kwargs, ): """ Main training entry point. Args: resume_from_checkpoint (`str` or `bool`, *optional*): If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here. trial (`optuna.Trial` or `dict[str, Any]`, *optional*): The trial run or the hyperparameter dictionary for hyperparameter search. ignore_keys_for_eval (`list[str]`, *optional*) A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions for evaluation during the training. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments used to hide deprecated arguments """ if resume_from_checkpoint is False: resume_from_checkpoint = None # memory metrics - must set up as early as possible self._memory_tracker.start() args = self.args self.is_in_train = True # If the model uses a tokenizer, it may have a new tokens for fine-tuning purposes. if isinstance(self.processing_class, (PreTrainedTokenizerBase, ProcessorMixin)) and hasattr( self.model, "config" ): self._align_special_tokens() # Attach NEFTune hooks if necessary if self.neftune_noise_alpha is not None: self.model = self._activate_neftune(self.model) # do_train is not a reliable argument, as it might not be set and .train() still called, so # the following is a workaround: if ( (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train and not self.is_model_parallel and self.model_init is None ): self._move_model_to_device(self.model, args.device) if "model_path" in kwargs: resume_from_checkpoint = kwargs.pop("model_path") warnings.warn( "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` " "instead.", FutureWarning, ) if len(kwargs) > 0: raise TypeError(f"train() got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") # This might change the seed so needs to run first. self._hp_search_setup(trial) self._train_batch_size = self.args.train_batch_size # Model re-init model_reloaded = False if self.model_init is not None: # Seed must be set before instantiating the model when using model_init. enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.model = self.call_model_init(trial) model_reloaded = True # Reinitializes optimizer and scheduler self.optimizer, self.lr_scheduler = None, None # Load potential model checkpoint if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: resume_from_checkpoint = get_last_checkpoint(args.output_dir) if resume_from_checkpoint is None: raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})") if resume_from_checkpoint is not None: if not is_sagemaker_mp_enabled() and not self.is_deepspeed_enabled and not self.is_fsdp_enabled: self._load_from_checkpoint(resume_from_checkpoint) # In case of repeating the find_executable_batch_size, set `self._train_batch_size` properly state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) if state.train_batch_size is not None: self._train_batch_size = state.train_batch_size # If model was re-initialized, put it on the right device and update self.model_wrapped if model_reloaded: if self.place_model_on_device: self._move_model_to_device(self.model, args.device) self.model_wrapped = self.model inner_training_loop = find_executable_batch_size( self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size ) if args.push_to_hub: try: # Disable progress bars when uploading models during checkpoints to avoid polluting stdout hf_hub_utils.disable_progress_bars() return inner_training_loop( args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval, ) finally: hf_hub_utils.enable_progress_bars() else: return inner_training_loop( args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval, ) def get_tp_size(self) -> int: """Get the tensor parallel size from either the model or DeepSpeed config.""" # 1. Check model.tp_size first if (model_tp := getattr(self.model, "_tp_size", None)) is not None: return model_tp # 2. Fall back to DeepSpeed config if enabled if self.is_deepspeed_enabled and (deepspeed_config := getattr(self.args, "hf_deepspeed_config", None)): return deepspeed_config.config.get("tensor_parallel", {}).get("autotp_size", 1) # 3. Default fallback return 1 def get_total_train_batch_size(self, args) -> int: """Calculates total batch size (micro_batch * grad_accum * dp_world_size). Note: Only considers DP and TP (dp_world_size = world_size // tp_size).""" dp_world_size = args.world_size // self.get_tp_size() return self._train_batch_size * args.gradient_accumulation_steps * dp_world_size def _inner_training_loop( self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None ): self.accelerator.free_memory() self._train_batch_size = batch_size if self.args.auto_find_batch_size: if self.state.train_batch_size != self._train_batch_size: from accelerate.utils import release_memory (self.model_wrapped,) = release_memory(self.model_wrapped) self.model_wrapped = self.model # Check for DeepSpeed *after* the initial pass and modify the config if self.is_deepspeed_enabled: # Temporarily unset `self.args.train_batch_size` original_bs = self.args.per_device_train_batch_size self.args.per_device_train_batch_size = self._train_batch_size // max(1, self.args.n_gpu) self.propagate_args_to_deepspeed(True) self.args.per_device_train_batch_size = original_bs self.state.train_batch_size = self._train_batch_size logger.debug(f"Currently training with a batch size of: {self._train_batch_size}") # Data loader and number of training steps train_dataloader = self.get_train_dataloader() if self.is_fsdp_xla_v2_enabled: train_dataloader = tpu_spmd_dataloader(train_dataloader) # Setting up training control variables: # number of training epochs: num_train_epochs # number of training steps per epoch: num_update_steps_per_epoch # total number of training steps to execute: max_steps total_train_batch_size = self.get_total_train_batch_size(args) ( num_train_epochs, num_update_steps_per_epoch, num_examples, num_train_samples, epoch_based, len_dataloader, max_steps, ) = self.set_initial_training_values(args, train_dataloader, total_train_batch_size) num_train_tokens = None if self.args.include_tokens_per_second: num_train_tokens = self.num_tokens(train_dataloader, None if epoch_based else max_steps) # If going by epochs, multiply tokens linearly if len_dataloader is not None and epoch_based: num_train_tokens *= args.num_train_epochs # Otherwise since its steps, we just multiply by grad accum else: num_train_tokens *= args.gradient_accumulation_steps if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: # nn.DataParallel(model) replicates the model, creating new variables and module # references registered here no longer work on other gpus, breaking the module raise ValueError( "Currently --debug underflow_overflow is not supported under DP. Please use DDP" " (torchrun or torch.distributed.launch (deprecated))." ) else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa delay_optimizer_creation = is_sagemaker_mp_enabled() or self.is_fsdp_xla_enabled or self.is_fsdp_enabled # Can't delay optimizer creation when using FSDP2: https://github.com/huggingface/accelerate/blob/3f636d626063ffcf9a337c7d3624d61b7d187d59/src/accelerate/accelerator.py#L1404 is_fsdp2 = self.is_fsdp_enabled and (getattr(self.accelerator.state.fsdp_plugin, "fsdp_version", 1) == 2) if is_fsdp2: delay_optimizer_creation = False # We need to reset the scheduler, as its parameters may be different on subsequent calls if self._created_lr_scheduler: self.lr_scheduler = None self._created_lr_scheduler = False if self.is_deepspeed_enabled: self.optimizer, self.lr_scheduler = deepspeed_init(self, num_training_steps=max_steps) if not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState( stateful_callbacks=[ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ] ) self.state.is_hyper_param_search = trial is not None self.state.train_batch_size = self._train_batch_size # Compute absolute values for logging, eval, and save if given as ratio self.state.compute_steps(args, max_steps) # Activate gradient checkpointing if needed if args.gradient_checkpointing: self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=args.gradient_checkpointing_kwargs) model = self._wrap_model(self.model_wrapped) # as the model is wrapped, don't use `accelerator.prepare` # this is for unhandled cases such as # FSDP-XLA, SageMaker MP/DP, DataParallel, IPEX use_accelerator_prepare = model is self.model if use_accelerator_prepare and self.is_fsdp_enabled: # In case of auto_find_batch_size=True # Remove FSDP wrapping from sub-models. self.model = unwrap_model(self.model, recursive=True) if delay_optimizer_creation: if use_accelerator_prepare: # configure fsdp plugin for qlora if any self._fsdp_qlora_plugin_updates() if self.accelerator.mixed_precision != "fp8": self.model = self.accelerator.prepare(self.model) self.create_optimizer_and_scheduler(num_training_steps=max_steps) # prepare using `accelerator` prepare if use_accelerator_prepare: self.model.train() if hasattr(self.lr_scheduler, "step"): if self.use_apex: model = self.accelerator.prepare(self.model) else: # We should avoid accelerate preparing the model in TP case since we dont need it as it is handled by transformers from_pretrained and also it goes into DDP based preparation. if self.is_tp_enabled: self.optimizer = self.accelerator.prepare(self.optimizer) else: model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) else: # to handle cases wherein we pass "DummyScheduler" such as when it is specified in DeepSpeed config. model, self.optimizer, self.lr_scheduler = self.accelerator.prepare( self.model, self.optimizer, self.lr_scheduler ) elif self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: # In this case we are in DDP + LOMO, which should be supported self.optimizer = self.accelerator.prepare(self.optimizer) if self.is_fsdp_enabled: self.model = self.model_wrapped = model # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model # backward compatibility if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped # ckpt loading if resume_from_checkpoint is not None: if self.is_deepspeed_enabled: deepspeed_load_checkpoint( self.model_wrapped, resume_from_checkpoint, load_module_strict=not _is_peft_model(self.model) ) elif is_sagemaker_mp_enabled() or self.is_fsdp_enabled: self._load_from_checkpoint(resume_from_checkpoint, self.model_wrapped) # Check if saved optimizer or scheduler states exist self._load_optimizer_and_scheduler(resume_from_checkpoint) self._load_scaler(resume_from_checkpoint) # important: at this point: # self.model is the Transformers Model # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), # FSDP(Transformers Model), Dynamo Optimized Module(Transformers Model) etc. # Train! logger.info("***** Running training *****") logger.info(f" Num examples = {num_examples:,}") logger.info(f" Num Epochs = {num_train_epochs:,}") logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size:,}") if self.args.per_device_train_batch_size != self._train_batch_size: logger.info(f" Training with DataParallel so batch size has been adjusted to: {self._train_batch_size:,}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size:,}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {max_steps:,}") logger.info(f" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}") self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 steps_trained_progress_bar = None # Check if continuing training from a checkpoint if resume_from_checkpoint is not None and os.path.isfile( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) ): self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) self.compare_trainer_and_checkpoint_args(self.args, self.state) self._load_callback_state() epochs_trained = int(self.state.global_step // num_update_steps_per_epoch) if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0 logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(f" Continuing training from epoch {epochs_trained}") logger.info(f" Continuing training from global step {self.state.global_step}") if not args.ignore_data_skip: logger.info( f" Will skip the first {epochs_trained} epochs then the first" f" {steps_trained_in_current_epoch} batches in the first epoch." ) # Update the references for attr in ("model", "optimizer", "lr_scheduler"): setattr(self.callback_handler, attr, getattr(self, attr)) self.callback_handler.train_dataloader = train_dataloader self.state.init_training_references(self, max_steps, num_train_epochs, trial) # tr_loss is a tensor to avoid synchronization of TPUs through .item() tr_loss = torch.tensor(0.0, device=args.device) # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() grad_norm: Optional[float] = None learning_rate = None self.control = self.callback_handler.on_train_begin(args, self.state, self.control) if args.eval_on_start: self._evaluate(trial, ignore_keys_for_eval, skip_scheduler=True) for epoch in range(epochs_trained, num_train_epochs): epoch_dataloader = train_dataloader if hasattr(epoch_dataloader, "set_epoch"): epoch_dataloader.set_epoch(epoch) # Reset the past mems state at the beginning of each epoch if necessary. if args.past_index >= 0: self._past = None steps_in_epoch = ( len(epoch_dataloader) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps ) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False steps_skipped = 0 if steps_trained_in_current_epoch > 0: epoch_dataloader = skip_first_batches(epoch_dataloader, steps_trained_in_current_epoch) steps_skipped = steps_trained_in_current_epoch steps_trained_in_current_epoch = 0 rng_to_sync = True step = -1 epoch_iterator = iter(epoch_dataloader) # We chunkify the epoch iterator into gradient accumulation steps `n` batches remainder = steps_in_epoch % args.gradient_accumulation_steps if remainder == 0: remainder = args.gradient_accumulation_steps update_step = -1 total_updates = steps_in_epoch // args.gradient_accumulation_steps + int( remainder < args.gradient_accumulation_steps ) for _ in range(total_updates): update_step += 1 num_batches = args.gradient_accumulation_steps if update_step != (total_updates - 1) else remainder batch_samples, num_items_in_batch = self.get_batch_samples(epoch_iterator, num_batches, args.device) # Store the number of batches for current gradient accumulation # This is used to correctly scale the loss when the last accumulation step has fewer batches self.current_gradient_accumulation_steps = len(batch_samples) for i, inputs in enumerate(batch_samples): step += 1 do_sync_step = (step + 1) % args.gradient_accumulation_steps == 0 or (step + 1) == steps_in_epoch # Since we perform prefetching, we need to manually set sync_gradients self.accelerator.gradient_state._set_sync_gradients(do_sync_step) if self.args.include_num_input_tokens_seen: main_input_name = getattr(self.model, "main_input_name", "input_ids") if main_input_name not in inputs: logger.warning( "Tried to track the number of tokens seen, however the current model is " "not configured properly to know what item is the input. To fix this, add " "a `main_input_name` attribute to the model class you are using." ) else: input_tokens = inputs[main_input_name].numel() input_tokens = torch.tensor(input_tokens, device=self.args.device, dtype=torch.int64) self.state.num_input_tokens_seen += self.accelerator.gather(input_tokens).sum().item() if rng_to_sync: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 if steps_trained_progress_bar is not None: steps_trained_progress_bar.update(1) if steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) continue elif steps_trained_progress_bar is not None: steps_trained_progress_bar.close() steps_trained_progress_bar = None if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control) # We explicitly want to avoid relying on `accelerator.accumulate` for generation training context = ( functools.partial(self.accelerator.no_sync, model=model) if i != len(batch_samples) - 1 and self.accelerator.distributed_type != DistributedType.DEEPSPEED else contextlib.nullcontext ) with context(): tr_loss_step = self.training_step(model, inputs, num_items_in_batch) if ( args.logging_nan_inf_filter and not is_torch_xla_available() and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) ): # if loss is nan or inf simply add the average of previous logged losses tr_loss = tr_loss + tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) else: if tr_loss.device != tr_loss_step.device: raise ValueError( f"Calculated loss must be on the original device: {tr_loss.device} but device in use is {tr_loss_step.device}" ) tr_loss = tr_loss + tr_loss_step self.current_flos += float(self.floating_point_ops(inputs)) if do_sync_step: # Since we perform prefetching, we need to manually set sync_gradients to True self.accelerator.gradient_state._set_sync_gradients(True) # Gradient clipping if args.max_grad_norm is not None and args.max_grad_norm > 0: if is_sagemaker_mp_enabled() and args.fp16: _grad_norm = self.optimizer.clip_master_grads(args.max_grad_norm) elif self.use_apex: from apex import amp # Revert to normal clipping otherwise, handling Apex or full precision _grad_norm = nn.utils.clip_grad_norm_( amp.master_params(self.optimizer), args.max_grad_norm, ) else: grad_norm_context = contextlib.nullcontext if self.is_tp_enabled: from torch.distributed._tensor.experimental import implicit_replication grad_norm_context = implicit_replication with grad_norm_context(): _grad_norm = self.accelerator.clip_grad_norm_( model.parameters(), args.max_grad_norm, ) if ( is_accelerate_available() and self.accelerator.distributed_type == DistributedType.DEEPSPEED ): grad_norm = model.get_global_grad_norm() # In some cases the grad norm may not return a float if hasattr(grad_norm, "item"): grad_norm = grad_norm.item() else: grad_norm = _grad_norm self.control = self.callback_handler.on_pre_optimizer_step(args, self.state, self.control) context = contextlib.nullcontext if self.is_tp_enabled: from torch.distributed._tensor.experimental import implicit_replication context = implicit_replication with context(): self.optimizer.step() self.control = self.callback_handler.on_optimizer_step(args, self.state, self.control) # get leaning rate before update learning_rate = self._get_learning_rate() if not self.accelerator.optimizer_step_was_skipped: # Delay optimizer scheduling until metrics are generated if not isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate( tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time, learning_rate=learning_rate, ) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control) # PyTorch/XLA relies on the data loader to insert the mark_step for # each step. Since we are breaking the loop early, we need to manually # insert the mark_step here. if self.control.should_epoch_stop or self.control.should_training_stop: if is_torch_xla_available(): xm.mark_step() break # We also need to break out of the nested loop if self.control.should_epoch_stop or self.control.should_training_stop: if is_torch_xla_available(): xm.mark_step() break if step < 0: logger.warning( "There seems not to be a single sample in your epoch_iterator, stopping training at step" f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" f" num_steps ({max_steps}) higher than the number of available samples." ) self.control.should_training_stop = True self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate( tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time, learning_rate=learning_rate ) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: if is_torch_xla_available(): # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: # Wait for everyone to get here so we are sure the model has been saved by process 0. if is_torch_xla_available(): xm.rendezvous("load_best_model_at_end") elif args.parallel_mode == ParallelMode.DISTRIBUTED: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() # add remaining tr_loss self._total_loss_scalar += tr_loss.item() effective_global_step = max(self.state.global_step, 0.001) # Avoid ZeroDivisionError train_loss = self._total_loss_scalar / effective_global_step metrics = speed_metrics( "train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps, num_tokens=num_train_tokens, ) self.store_flos() metrics["total_flos"] = self.state.total_flos metrics["train_loss"] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save. if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1: for checkpoint in checkpoints_sorted: if not os.path.samefile(checkpoint, self.state.best_model_checkpoint): logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) self.control = self.callback_handler.on_train_end(args, self.state, self.control) # Wait for the checkpoint to be uploaded. self._finish_current_push() # After training we make sure to retrieve back the original forward pass method # for the embedding layer by removing the forward post hook. if self.neftune_noise_alpha is not None: self._deactivate_neftune(self.model) return TrainOutput(self.state.global_step, train_loss, metrics) def _get_output_dir(self, trial): if self.hp_search_backend is not None and trial is not None: if self.hp_search_backend == HPSearchBackend.OPTUNA: run_id = trial.number elif self.hp_search_backend == HPSearchBackend.RAY: import ray.train run_id = ray.train.get_context().get_trial_id() elif self.hp_search_backend == HPSearchBackend.SIGOPT: run_id = trial.id elif self.hp_search_backend == HPSearchBackend.WANDB: import wandb run_id = wandb.run.id run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}" run_dir = os.path.join(self.args.output_dir, run_name) else: run_dir = self.args.output_dir return run_dir def _load_from_checkpoint(self, resume_from_checkpoint, model=None): if model is None: model = self.model config_file = os.path.join(resume_from_checkpoint, CONFIG_NAME) adapter_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_WEIGHTS_NAME) adapter_safe_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) weights_file = os.path.join(resume_from_checkpoint, WEIGHTS_NAME) weights_index_file = os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME) safe_weights_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_NAME) safe_weights_index_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_INDEX_NAME) is_fsdp_ckpt = os.path.isdir(resume_from_checkpoint) and ( # this checks the FSDP state dict when `SHARDED_STATE_DICT` is used any( FSDP_MODEL_NAME in folder_name for folder_name in os.listdir(resume_from_checkpoint) if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)) ) # this checks the FSDP state dict when `FULL_STATE_DICT` is used or os.path.isfile(os.path.join(resume_from_checkpoint, f"{FSDP_MODEL_NAME}.bin")) ) # if multiple adapters exist, they get saved in sub directories adapter_subdirs = ( [ folder_name for folder_name in os.listdir(resume_from_checkpoint) if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)) and ( os.path.isfile(os.path.join(resume_from_checkpoint, folder_name, ADAPTER_WEIGHTS_NAME)) or os.path.isfile(os.path.join(resume_from_checkpoint, folder_name, ADAPTER_SAFE_WEIGHTS_NAME)) ) ] if os.path.isdir(resume_from_checkpoint) else [] ) if is_fsdp_ckpt and not self.is_fsdp_enabled: raise ValueError(f"Checkpoint found at {resume_from_checkpoint} is only supported when using PyTorch FSDP") if not ( any( os.path.isfile(f) for f in [ weights_file, safe_weights_file, weights_index_file, safe_weights_index_file, adapter_weights_file, adapter_safe_weights_file, ] ) or is_fsdp_ckpt or adapter_subdirs ): raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}") logger.info(f"Loading model from {resume_from_checkpoint}.") if os.path.isfile(config_file): config = PretrainedConfig.from_json_file(config_file) checkpoint_version = config.transformers_version if checkpoint_version is not None and checkpoint_version != __version__: logger.warning( f"You are resuming training from a checkpoint trained with {checkpoint_version} of " f"Transformers but your current version is {__version__}. This is not recommended and could " "yield to errors or unwanted behaviors." ) if os.path.isfile(weights_file) or os.path.isfile(safe_weights_file) or is_fsdp_ckpt: # If the model is on the GPU, it still works! if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")): # If the 'user_content.pt' file exists, load with the new smp api. # Checkpoint must have been saved with the new smp api. smp.resume_from_checkpoint( path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False ) else: # If the 'user_content.pt' file does NOT exist, load with the old smp api. # Checkpoint must have been saved with the old smp api. if hasattr(self.args, "fp16") and self.args.fp16 is True: logger.warning( "Enabling FP16 and loading from smp < 1.10 checkpoint together is not supported." ) check_torch_load_is_safe() state_dict = torch.load(weights_file, map_location="cpu", weights_only=True) # Required for smp to not auto-translate state_dict from hf to smp (is already smp). state_dict["_smp_is_partial"] = False load_result = model.load_state_dict(state_dict, strict=True) # release memory del state_dict elif self.is_fsdp_enabled: load_fsdp_model( self.accelerator.state.fsdp_plugin, self.accelerator, model, resume_from_checkpoint, **_get_fsdp_ckpt_kwargs(), ) else: # We load the model state dict on the CPU to avoid an OOM error. if self.args.save_safetensors and os.path.isfile(safe_weights_file): state_dict = safetensors.torch.load_file(safe_weights_file, device="cpu") else: check_torch_load_is_safe() state_dict = torch.load(weights_file, map_location="cpu", weights_only=True) # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 # which takes *args instead of **kwargs load_result = model.load_state_dict(state_dict, False) # release memory del state_dict self._issue_warnings_after_load(load_result) # Load adapters following PR # 24096 elif _is_peft_model(model): # If train a model using PEFT & LoRA, assume that adapter have been saved properly. # TODO: in the future support only specific min PEFT versions if (hasattr(model, "active_adapter") or hasattr(model, "active_adapters")) and hasattr( model, "load_adapter" ): if os.path.exists(resume_from_checkpoint): # For BC for older PEFT versions if hasattr(model, "active_adapters"): active_adapters = model.active_adapters if len(active_adapters) > 1: logger.warning("Multiple active adapters detected will only consider the first adapter") active_adapter = active_adapters[0] else: active_adapter = model.active_adapter if adapter_subdirs: for subdir_name in adapter_subdirs: peft_id = os.path.join(resume_from_checkpoint, subdir_name) model.load_adapter(peft_id, subdir_name, is_trainable=(subdir_name == active_adapter)) model.set_adapter(active_adapter) else: model.load_adapter(resume_from_checkpoint, active_adapter, is_trainable=True) else: logger.warning( "The intermediate checkpoints of PEFT may not be saved correctly, " f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. " "Check some examples here: https://github.com/huggingface/peft/issues/96" ) else: logger.warning("Could not load adapter model, make sure to have `peft>=0.3.0` installed") else: # We load the sharded checkpoint load_result = load_sharded_checkpoint( model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled(), prefer_safe=self.args.save_safetensors ) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) def _load_best_model(self): logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).") best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME) best_safe_model_path = os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_NAME) best_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_WEIGHTS_NAME) best_safe_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model if self.is_deepspeed_enabled: deepspeed_load_checkpoint( self.model_wrapped, self.state.best_model_checkpoint, load_module_strict=not _is_peft_model(self.model), ) elif self.is_fsdp_enabled: load_result = load_fsdp_model( self.accelerator.state.fsdp_plugin, self.accelerator, model, self.state.best_model_checkpoint, **_get_fsdp_ckpt_kwargs(), ) elif ( os.path.exists(best_model_path) or os.path.exists(best_safe_model_path) or os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path) ): has_been_loaded = True if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")): # If the 'user_content.pt' file exists, load with the new smp api. # Checkpoint must have been saved with the new smp api. smp.resume_from_checkpoint( path=self.state.best_model_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False, ) else: # If the 'user_content.pt' file does NOT exist, load with the old smp api. # Checkpoint must have been saved with the old smp api. if self.args.save_safetensors and os.path.isfile(best_safe_model_path): state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu") else: check_torch_load_is_safe() state_dict = torch.load(best_model_path, map_location="cpu", weights_only=True) state_dict["_smp_is_partial"] = False load_result = model.load_state_dict(state_dict, strict=True) else: if _is_peft_model(model): # If train a model using PEFT & LoRA, assume that adapter have been saved properly. # TODO: in the future support only specific min PEFT versions if (hasattr(model, "active_adapter") or hasattr(model, "active_adapters")) and hasattr( model, "load_adapter" ): # For BC for older PEFT versions if hasattr(model, "active_adapters"): active_adapter = model.active_adapters[0] if len(model.active_adapters) > 1: logger.warning("Detected multiple active adapters, will only consider the first one") else: active_adapter = model.active_adapter if os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path): try: model.load_adapter(self.state.best_model_checkpoint, active_adapter) except RuntimeError as exc: if model.peft_config[active_adapter].is_prompt_learning: # for context: https://github.com/huggingface/peft/issues/2256 msg = ( "When using prompt learning PEFT methods such as " f"{model.peft_config[active_adapter].peft_type.value}, setting " "load_best_model_at_end=True can lead to errors, it is recommended " "to set this to False and to load the model manually from the checkpoint " "directory using PeftModel.from_pretrained(base_model, <path>) after training " "has finished." ) raise RuntimeError(msg) from exc else: raise # Load_adapter has no return value present, modify it when appropriate. from torch.nn.modules.module import _IncompatibleKeys load_result = _IncompatibleKeys([], []) else: logger.warning( "The intermediate checkpoints of PEFT may not be saved correctly, " f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. " "Check some examples here: https://github.com/huggingface/peft/issues/96" ) has_been_loaded = False else: logger.warning("Could not load adapter model, make sure to have `peft>=0.3.0` installed") has_been_loaded = False else: # We load the model state dict on the CPU to avoid an OOM error. if self.args.save_safetensors and os.path.isfile(best_safe_model_path): state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu") else: check_torch_load_is_safe() state_dict = torch.load(best_model_path, map_location="cpu", weights_only=True) # If the model is on the GPU, it still works! # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 # which takes *args instead of **kwargs load_result = model.load_state_dict(state_dict, False) if not is_sagemaker_mp_enabled() and has_been_loaded: self._issue_warnings_after_load(load_result) elif os.path.exists(os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_INDEX_NAME)) or os.path.exists( os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME) ): load_result = load_sharded_checkpoint( model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled() ) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) else: logger.warning( f"Could not locate the best model at {best_model_path}, if you are running a distributed training " "on multiple nodes, you should activate `--save_on_each_node`." ) def _issue_warnings_after_load(self, load_result): if len(load_result.missing_keys) != 0: if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set( self.model._keys_to_ignore_on_save ): self.model.tie_weights() else: logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.") if len(load_result.unexpected_keys) != 0: logger.warning( f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}." ) def _evaluate(self, trial, ignore_keys_for_eval, skip_scheduler=False): metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) self._report_to_hp_search(trial, self.state.global_step, metrics) # Run delayed LR scheduler now that metrics are populated if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) and not skip_scheduler: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" try: self.lr_scheduler.step(metrics[metric_to_check]) except KeyError as exc: raise KeyError( f"The `metric_for_best_model` training argument is set to '{metric_to_check}', " f"which is not found in the evaluation metrics. " f"The available evaluation metrics are: {list(metrics.keys())}. " f"Please ensure that the `compute_metrics` function returns a dictionary that includes '{metric_to_check}' or " f"consider changing the `metric_for_best_model` via the TrainingArguments." ) from exc return metrics def _maybe_log_save_evaluate( self, tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time, learning_rate=None ): if self.control.should_log and self.state.global_step > self._globalstep_last_logged: if is_torch_xla_available(): xm.mark_step() logs: dict[str, float] = {} # all_gather + mean() to get average loss over all processes tr_loss_scalar = self._nested_gather(tr_loss).mean().item() # reset tr_loss to zero tr_loss -= tr_loss logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) if grad_norm is not None: logs["grad_norm"] = grad_norm.item() if isinstance(grad_norm, torch.Tensor) else grad_norm if learning_rate is not None: logs["learning_rate"] = learning_rate else: logs["learning_rate"] = self._get_learning_rate() self._total_loss_scalar += tr_loss_scalar self._globalstep_last_logged = self.state.global_step self.store_flos() self.log(logs, start_time) metrics = None if self.control.should_evaluate: metrics = self._evaluate(trial, ignore_keys_for_eval) is_new_best_metric = self._determine_best_metric(metrics=metrics, trial=trial) if self.args.save_strategy == SaveStrategy.BEST: self.control.should_save = is_new_best_metric if self.control.should_save: self._save_checkpoint(model, trial) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def _load_rng_state(self, checkpoint): # Load RNG states from `checkpoint` if checkpoint is None: return if self.args.world_size > 1: process_index = self.args.process_index rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {process_index}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(checkpoint, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return with safe_globals(): checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if is_torch_xla_available(): xm.set_rng_state(checkpoint_rng_state["xla"]) is_distributed = self.args.parallel_mode == ParallelMode.DISTRIBUTED if torch.cuda.is_available(): set_rng_state_for_device("CUDA", torch.cuda, checkpoint_rng_state, is_distributed) if is_torch_npu_available(): set_rng_state_for_device("NPU", torch.npu, checkpoint_rng_state, is_distributed) if is_torch_hpu_available(): set_rng_state_for_device("HPU", torch.hpu, checkpoint_rng_state, is_distributed) if is_torch_mlu_available(): set_rng_state_for_device("MLU", torch.mlu, checkpoint_rng_state, is_distributed) if is_torch_musa_available(): set_rng_state_for_device("MUSA", torch.musa, checkpoint_rng_state, is_distributed) def _determine_best_metric(self, metrics, trial): """ Determine if the model should be saved based on the evaluation metrics. Returns: bool: True if a new best metric was found, else False """ is_new_best_metric = False if self.args.metric_for_best_model is not None: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" try: metric_value = metrics[metric_to_check] except KeyError as exc: raise KeyError( f"The `metric_for_best_model` training argument is set to '{metric_to_check}', which is not found in the evaluation metrics. " f"The available evaluation metrics are: {list(metrics.keys())}. Consider changing the `metric_for_best_model` via the TrainingArguments." ) from exc operator = np.greater if self.args.greater_is_better else np.less if self.state.best_metric is None: self.state.best_metric = float("-inf") if self.args.greater_is_better else float("inf") if operator(metric_value, self.state.best_metric): self.state.best_metric = metric_value if self.args.save_strategy in [SaveStrategy.STEPS, SaveStrategy.EPOCH]: self.state.best_global_step = self.state.global_step is_new_best_metric = True return is_new_best_metric def _save_checkpoint(self, model, trial): # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we # want to save except FullyShardedDDP. # assert unwrap_model(model) is self.model, "internal model should be a reference to self.model" # Save model checkpoint checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" if self.hp_search_backend is None and trial is None: self.store_flos() run_dir = self._get_output_dir(trial=trial) output_dir = os.path.join(run_dir, checkpoint_folder) self.save_model(output_dir, _internal_call=True) if self.args.save_strategy in [SaveStrategy.STEPS, SaveStrategy.EPOCH] and self.state.best_global_step: best_checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.best_global_step}" best_checkpoint_dir = os.path.join(run_dir, best_checkpoint_folder) if os.path.exists(best_checkpoint_dir): self.state.best_model_checkpoint = best_checkpoint_dir if not self.args.save_only_model: # Save optimizer and scheduler self._save_optimizer_and_scheduler(output_dir) self._save_scaler(output_dir) # Save RNG state self._save_rng_state(output_dir) # Save the Trainer state if self.args.should_save: # Update `ExportableState` callbacks and `TrainerControl` state to where we are currently for cb in [ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ]: cb_name = cb.__class__.__name__ cb_state = cb.state() if isinstance(self.state.stateful_callbacks[cb_name], list): self.state.stateful_callbacks[cb_name].append(cb_state) else: self.state.stateful_callbacks[cb_name] = cb_state self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) if self.args.push_to_hub: self._push_from_checkpoint(output_dir) # Maybe delete some older checkpoints. if self.args.should_save: # we use mtime as default, filesystems without mtime support will be detected in `_sorted_checkpoints` self._rotate_checkpoints(use_mtime=True, output_dir=run_dir) def _save_rng_state(self, output_dir): # Save RNG state in non-distributed training rng_states = { "python": random.getstate(), "numpy": np.random.get_state(), "cpu": torch.random.get_rng_state(), } if torch.cuda.is_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: # In non distributed, we save the global CUDA RNG state (will take care of DataParallel) rng_states["cuda"] = torch.cuda.random.get_rng_state_all() else: rng_states["cuda"] = torch.cuda.random.get_rng_state() if is_torch_xla_available(): rng_states["xla"] = xm.get_rng_state() if is_torch_npu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["npu"] = torch.npu.random.get_rng_state_all() else: rng_states["npu"] = torch.npu.random.get_rng_state() if is_torch_hpu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["hpu"] = torch.hpu.random.get_rng_state_all() else: rng_states["hpu"] = torch.hpu.random.get_rng_state() if is_torch_mlu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["mlu"] = torch.mlu.random.get_rng_state_all() else: rng_states["mlu"] = torch.mlu.random.get_rng_state() if is_torch_musa_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["musa"] = torch.musa.get_rng_state_all() else: rng_states["musa"] = torch.musa.get_rng_state() # A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may # not yet exist. os.makedirs(output_dir, exist_ok=True) if self.args.world_size <= 1: torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) else: torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth")) def _save_optimizer_and_scheduler(self, output_dir): if is_torch_xla_available(): xm.rendezvous("saving_optimizer_states") if self.is_fsdp_xla_v1_enabled: optm = { "optimizer": self.optimizer.state_dict(), "shard_metadata": self.model.get_shard_metadata(), } xm.save( optm, os.path.join( output_dir, f"rank{self.args.process_index}-of-{self.args.world_size}-{OPTIMIZER_NAME}" ), master_only=False, ) else: xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) with warnings.catch_warnings(record=True) as caught_warnings: xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) elif is_sagemaker_mp_enabled(): opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) smp.barrier() if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: smp.save( opt_state_dict, os.path.join(output_dir, OPTIMIZER_NAME), partial=True, v3=smp.state.cfg.shard_optimizer_state, ) elif self.is_deepspeed_enabled: # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed # config `stage3_gather_16bit_weights_on_model_save` is True accept_exclude_frozen_parameters = "exclude_frozen_parameters" in set( inspect.signature(self.model_wrapped.save_checkpoint).parameters.keys() ) if accept_exclude_frozen_parameters and _is_peft_model(self.model): self.model_wrapped.save_checkpoint(output_dir, exclude_frozen_parameters=True) else: self.model_wrapped.save_checkpoint(output_dir) elif self.is_fsdp_enabled: # save fsdp specific ckpt for resuming from ckpt save_fsdp_model( self.accelerator.state.fsdp_plugin, self.accelerator, self.model, output_dir, **_get_fsdp_ckpt_kwargs() ) save_fsdp_optimizer( self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, output_dir ) elif self.args.should_save: # deepspeed.save_checkpoint above saves model/optim/sched torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) # Save SCHEDULER & SCALER is_deepspeed_custom_scheduler = self.is_deepspeed_enabled and not isinstance( self.lr_scheduler, DeepSpeedSchedulerWrapper ) if ( self.args.should_save and (not self.is_deepspeed_enabled or is_deepspeed_custom_scheduler) and not is_torch_xla_available() ): with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) def _load_optimizer_and_scheduler(self, checkpoint): """If optimizer and scheduler states exist, load them.""" if checkpoint is None: return if self.is_deepspeed_enabled: # deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init if not isinstance(self.lr_scheduler, DeepSpeedSchedulerWrapper): with warnings.catch_warnings(record=True) as caught_warnings: check_torch_load_is_safe() self.lr_scheduler.load_state_dict( torch.load(os.path.join(checkpoint, SCHEDULER_NAME), weights_only=True) ) reissue_pt_warnings(caught_warnings) return checkpoint_file_exists = ( glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*") if is_sagemaker_mp_enabled() else ( os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) or os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME_BIN)) or ( os.path.isdir(checkpoint) and any( OPTIMIZER_NAME_BIN.split(".")[0] in folder_name for folder_name in os.listdir(checkpoint) if os.path.isdir(os.path.join(checkpoint, folder_name)) ) ) ) ) checkpoint_file_exists = ( glob.glob(os.path.join(checkpoint, f"rank*-of-{self.args.world_size}-{OPTIMIZER_NAME}")) if self.is_fsdp_xla_v1_enabled else checkpoint_file_exists ) if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)): # Load in optimizer and scheduler states if is_torch_xla_available(): # On TPU we have to take some extra precautions to properly load the states on the right device. if self.is_fsdp_xla_v1_enabled: check_torch_load_is_safe() optimizer_state = torch.load( os.path.join( checkpoint, f"rank{self.args.process_index}-of-{self.args.world_size}-{OPTIMIZER_NAME}" ), map_location="cpu", weights_only=True, ) # We only need `optimizer` when resuming from checkpoint optimizer_state = optimizer_state["optimizer"] else: check_torch_load_is_safe() optimizer_state = torch.load( os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu", weights_only=True ) with warnings.catch_warnings(record=True) as caught_warnings: check_torch_load_is_safe() lr_scheduler_state = torch.load( os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu", weights_only=True ) reissue_pt_warnings(caught_warnings) xm.send_cpu_data_to_device(optimizer_state, self.args.device) xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) self.optimizer.load_state_dict(optimizer_state) self.lr_scheduler.load_state_dict(lr_scheduler_state) else: if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(checkpoint, "user_content.pt")): # Optimizer checkpoint was saved with smp >= 1.10 def opt_load_hook(mod, opt): opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) else: # Optimizer checkpoint was saved with smp < 1.10 def opt_load_hook(mod, opt): if IS_SAGEMAKER_MP_POST_1_10: opt.load_state_dict( smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True) ) else: opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) self.model_wrapped.register_post_step_hook(opt_load_hook) else: # We use the CPU when training on one GPU to avoid OOM for GPU RAM when training big models. # In distributed training however, we load directly on each GPU and risk the GPU OOM as it's more # likely to get OOM on CPU (since we load num_gpu times the optimizer state map_location = self.args.device if self.args.world_size > 1 else "cpu" if self.is_fsdp_enabled: load_fsdp_optimizer( self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, checkpoint, **_get_fsdp_ckpt_kwargs(), ) else: check_torch_load_is_safe() self.optimizer.load_state_dict( torch.load( os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location, weights_only=True ) ) with warnings.catch_warnings(record=True) as caught_warnings: check_torch_load_is_safe() self.lr_scheduler.load_state_dict( torch.load(os.path.join(checkpoint, SCHEDULER_NAME), weights_only=True) ) reissue_pt_warnings(caught_warnings) def _save_scaler(self, output_dir): # See if there is a scaler attribute try: scaler = self.accelerator.scaler except AttributeError: return if scaler is None: return if is_torch_xla_available(): xm.rendezvous("saving_scaler_state") with warnings.catch_warnings(record=True) as caught_warnings: xm.save(self.accelerator.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)) reissue_pt_warnings(caught_warnings) # Save SCALER if self.args.should_save and not is_torch_xla_available(): with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.accelerator.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)) reissue_pt_warnings(caught_warnings) def _load_scaler(self, checkpoint): """If scaler state exists, load it.""" if checkpoint is None: return checkpoint_file_exists = os.path.isfile(os.path.join(checkpoint, SCALER_NAME)) if checkpoint_file_exists: # On TPU we have to take some extra precautions to properly load the states on the right device. # Load in scaler states if is_torch_xla_available(): with warnings.catch_warnings(record=True) as caught_warnings: check_torch_load_is_safe() scaler_state = torch.load( os.path.join(checkpoint, SCALER_NAME), map_location="cpu", weights_only=True ) reissue_pt_warnings(caught_warnings) xm.send_cpu_data_to_device(scaler_state, self.args.device) self.accelerator.scaler.load_state_dict(scaler_state) else: with warnings.catch_warnings(record=True) as caught_warnings: check_torch_load_is_safe() self.accelerator.scaler.load_state_dict( torch.load(os.path.join(checkpoint, SCALER_NAME), weights_only=True) ) reissue_pt_warnings(caught_warnings) def _load_callback_state(self): """If callback states exist and were passed in, restore their states if enabled""" if not self.args.restore_callback_states_from_checkpoint: return # Callback states are stored in stateful_callbacks not_found = [] new_callbacks = [] original_callbacks = self.callback_handler.callbacks + [self.control] for stored_callback, data in self.state.stateful_callbacks.items(): if not isinstance(data, list): data = [data] if any(callback.__class__.__name__ == stored_callback for callback in original_callbacks): # We can load/restore from multiple callbacks of the same type. duplicates = [ callback for callback in original_callbacks if callback.__class__.__name__ == stored_callback ] for callback, callback_data in zip(duplicates, data): args = callback_data.get("args", {}) attributes = callback_data.get("attributes", {}) new_callback = type(callback)(**args) for attribute, value in attributes.items(): setattr(new_callback, attribute, value) if isinstance(callback, TrainerControl): # Specifically for restoring the `control` state self.control = new_callback else: new_callbacks.append(new_callback) # We remove the existing callback and add it to the list of new callbacks self.callback_handler.remove_callback(type(new_callback)) logger.info("Continuing training from checkpoint, restoring any callbacks that were passed in") else: not_found.append(stored_callback) if len(not_found) > 0: logger.warning( f"Checkpoint included callbacks not included in current configuration. Ignoring. ({', '.join(not_found)})" ) for callback in new_callbacks: self.callback_handler.add_callback(callback) def hyperparameter_search( self, hp_space: Optional[Callable[["optuna.Trial"], dict[str, float]]] = None, compute_objective: Optional[Callable[[dict[str, float]], float]] = None, n_trials: int = 20, direction: Union[str, list[str]] = "minimize", backend: Optional[Union["str", HPSearchBackend]] = None, hp_name: Optional[Callable[["optuna.Trial"], str]] = None, **kwargs, ) -> Union[BestRun, list[BestRun]]: """ Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise. <Tip warning={true}> To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom optimizer/scheduler. </Tip> Args: hp_space (`Callable[["optuna.Trial"], dict[str, float]]`, *optional*): A function that defines the hyperparameter search space. Will default to [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or [`~trainer_utils.default_hp_space_sigopt`] depending on your backend. compute_objective (`Callable[[dict[str, float]], float]`, *optional*): A function computing the objective to minimize or maximize from the metrics returned by the `evaluate` method. Will default to [`~trainer_utils.default_compute_objective`]. n_trials (`int`, *optional*, defaults to 100): The number of trial runs to test. direction (`str` or `list[str]`, *optional*, defaults to `"minimize"`): If it's single objective optimization, direction is `str`, can be `"minimize"` or `"maximize"`, you should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. If it's multi objectives optimization, direction is `list[str]`, can be List of `"minimize"` and `"maximize"`, you should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. backend (`str` or [`~training_utils.HPSearchBackend`], *optional*): The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending on which one is installed. If all are installed, will default to optuna. hp_name (`Callable[["optuna.Trial"], str]]`, *optional*): A function that defines the trial/run name. Will default to None. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments for each backend: - `optuna`: parameters from [optuna.study.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html) and also the parameters `timeout`, `n_jobs` and `gc_after_trial` from [optuna.study.Study.optimize](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) - `ray`: parameters from [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run). If `resources_per_trial` is not set in the `kwargs`, it defaults to 1 CPU core and 1 GPU (if available). If `progress_reporter` is not set in the `kwargs`, [ray.tune.CLIReporter](https://docs.ray.io/en/latest/tune/api/doc/ray.tune.CLIReporter.html) is used. - `sigopt`: the parameter `proxies` from [sigopt.Connection.set_proxies](https://docs.sigopt.com/support/faq#how-do-i-use-sigopt-with-a-proxy). Returns: [`trainer_utils.BestRun` or `list[trainer_utils.BestRun]`]: All the information about the best run or best runs for multi-objective optimization. Experiment summary can be found in `run_summary` attribute for Ray backend. """ if backend is None: backend = default_hp_search_backend() backend = HPSearchBackend(backend) backend_obj = ALL_HYPERPARAMETER_SEARCH_BACKENDS[backend]() backend_obj.ensure_available() self.hp_search_backend = backend if self.model_init is None: raise RuntimeError( "To use hyperparameter search, you need to pass your model through a model_init function." ) self.hp_space = backend_obj.default_hp_space if hp_space is None else hp_space self.hp_name = hp_name self.compute_objective = default_compute_objective if compute_objective is None else compute_objective best_run = backend_obj.run(self, n_trials, direction, **kwargs) self.hp_search_backend = None return best_run def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None: """ Log `logs` on the various objects watching training. Subclass and override this method to inject custom behavior. Args: logs (`dict[str, float]`): The values to log. start_time (`Optional[float]`): The start of training. """ if self.state.epoch is not None: logs["epoch"] = self.state.epoch if self.args.include_num_input_tokens_seen: logs["num_input_tokens_seen"] = self.state.num_input_tokens_seen if start_time is not None: logs.update(speed_metrics("train", start_time, num_tokens=self.state.num_input_tokens_seen)) output = {**logs, **{"step": self.state.global_step}} self.state.log_history.append(output) self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]: """ Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors. """ if isinstance(data, Mapping): return type(data)({k: self._prepare_input(v) for k, v in data.items()}) elif isinstance(data, (tuple, list)): return type(data)(self._prepare_input(v) for v in data) elif isinstance(data, torch.Tensor): kwargs = {"device": self.args.device} if self.is_deepspeed_enabled and (torch.is_floating_point(data) or torch.is_complex(data)): # NLP models inputs are int/uint and those get adjusted to the right dtype of the # embedding. Other models such as wav2vec2's inputs are already float and thus # may need special handling to match the dtypes of the model kwargs.update({"dtype": self.accelerator.state.deepspeed_plugin.hf_ds_config.dtype()}) return data.to(**kwargs) return data def _prepare_inputs(self, inputs: dict[str, Union[torch.Tensor, Any]]) -> dict[str, Union[torch.Tensor, Any]]: """ Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and handling potential state. """ inputs = self._prepare_input(inputs) if len(inputs) == 0: raise ValueError( "The batch received was empty, your model won't be able to train on it. Double-check that your " f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}." ) if self.args.past_index >= 0 and self._past is not None: inputs["mems"] = self._past return inputs def compute_loss_context_manager(self): """ A helper wrapper to group together context managers. """ ctx_stack = contextlib.ExitStack() autocast_ctx = self.autocast_smart_context_manager() if not isinstance(autocast_ctx, contextlib.nullcontext): ctx_stack.enter_context(autocast_ctx) return ctx_stack def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True): """ A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired arguments, depending on the situation. """ if self.use_cpu_amp: # TODO Matt: This syntax is deprecated and the preferred version is # torch.amp.autocast("cpu", cache_enabled=cache_enabled, dtype=self.amp_dtype) # but this is unavailable on Torch 2.1 or earlier. We can change this when we stop supporting 2.1. ctx_manager = torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) else: ctx_manager = contextlib.nullcontext() return ctx_manager def training_step( self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to train. inputs (`dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. Return: `torch.Tensor`: The tensor with training loss on this batch. """ model.train() if hasattr(self.optimizer, "train") and callable(self.optimizer.train): self.optimizer.train() inputs = self._prepare_inputs(inputs) if is_sagemaker_mp_enabled(): loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) return loss_mb.reduce_mean().detach().to(self.args.device) with self.compute_loss_context_manager(): loss = self.compute_loss(model, inputs, num_items_in_batch=num_items_in_batch) del inputs if ( self.args.torch_empty_cache_steps is not None and self.state.global_step % self.args.torch_empty_cache_steps == 0 ): if is_torch_xpu_available(): torch.xpu.empty_cache() elif is_torch_mlu_available(): torch.mlu.empty_cache() elif is_torch_musa_available(): torch.musa.empty_cache() elif is_torch_npu_available(): torch.npu.empty_cache() elif is_torch_mps_available(): torch.mps.empty_cache() elif is_torch_hpu_available(): logger.warning( "`torch_empty_cache_steps` is set but HPU device/backend does not support empty_cache()." ) else: torch.cuda.empty_cache() kwargs = {} # For LOMO optimizers you need to explicitly use the learning rate if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: kwargs["learning_rate"] = self._get_learning_rate() if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.use_apex: from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: # Finally we need to normalize the loss for reporting if GA loss bug is not fixed during compute loss if (not self.model_accepts_loss_kwargs or num_items_in_batch is None) and self.compute_loss_func is None: # If the model does not accept loss kwargs, we need to normalize the loss by the number of gradient accumulation steps loss = loss / self.current_gradient_accumulation_steps # Turning off loss scaling w.r.t. gradient accumulation when DeepSpeed is enabled # https://github.com/huggingface/transformers/pull/35808 if self.accelerator.distributed_type == DistributedType.DEEPSPEED: kwargs["scale_wrt_gas"] = False self.accelerator.backward(loss, **kwargs) return loss.detach() def compute_loss( self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], return_outputs: bool = False, num_items_in_batch: Optional[torch.Tensor] = None, ): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Args: model (`nn.Module`): The model to compute the loss for. inputs (`dict[str, Union[torch.Tensor, Any]]`): The input data for the model. return_outputs (`bool`, *optional*, defaults to `False`): Whether to return the model outputs along with the loss. num_items_in_batch (Optional[torch.Tensor], *optional*): The number of items in the batch. If num_items_in_batch is not passed, Returns: The loss of the model along with its output if return_outputs was set to True Subclass and override for custom behavior. If you are not using `num_items_in_batch` when computing your loss, make sure to overwrite `self.model_accepts_loss_kwargs` to `False`. Otherwise, the loss calculating might be slightly inaccurate when performing gradient accumulation. """ if (self.label_smoother is not None or self.compute_loss_func is not None) and "labels" in inputs: labels = inputs.pop("labels") else: labels = None if self.model_accepts_loss_kwargs: kwargs = {} if num_items_in_batch is not None: kwargs["num_items_in_batch"] = num_items_in_batch inputs = {**inputs, **kwargs} outputs = model(**inputs) # Save past state if it exists # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index] if labels is not None: unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): model_name = unwrapped_model.base_model.model._get_name() else: model_name = unwrapped_model._get_name() # User-defined compute_loss function if self.compute_loss_func is not None: loss = self.compute_loss_func(outputs, labels, num_items_in_batch=num_items_in_batch) elif model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and "loss" not in outputs: raise ValueError( "The model did not return a loss from the inputs, only the following keys: " f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." ) # We don't use .loss here since the model may return tuples instead of ModelOutput. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] if ( self.args.average_tokens_across_devices and (self.model_accepts_loss_kwargs or self.compute_loss_func) and num_items_in_batch is not None ): loss *= self.accelerator.num_processes return (loss, outputs) if return_outputs else loss def is_local_process_zero(self) -> bool: """ Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. """ return self.args.local_process_index == 0 def is_world_process_zero(self) -> bool: """ Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). """ # Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global # process index. if is_sagemaker_mp_enabled(): return smp.rank() == 0 else: return self.args.process_index == 0 def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): """ Will save the model, so you can reload it using `from_pretrained()`. Will only save from the main process. """ if output_dir is None: output_dir = self.args.output_dir if is_torch_xla_available(): self._save_tpu(output_dir) elif is_sagemaker_mp_enabled(): # Calling the state_dict needs to be done on the wrapped model and on all processes. os.makedirs(output_dir, exist_ok=True) state_dict = self.model_wrapped.state_dict() if self.args.should_save: self._save(output_dir, state_dict=state_dict) if IS_SAGEMAKER_MP_POST_1_10: # 'user_content.pt' indicates model state_dict saved with smp >= 1.10 Path(os.path.join(output_dir, "user_content.pt")).touch() # We are in N-D parallelism if we have parallelism_config set, so we check accelerate if we're on a to_save rank elif getattr(self.accelerator, "parallelism_config", None) is not None: if self.accelerator.should_save_model: self._save(output_dir) # If we drop to here, we're in 1D parallelism, so all ranks need to go to `save_pretrained` elif (tp_size := getattr(self.model, "_tp_size", 0)) is not None and tp_size > 1: self._save(output_dir) elif self.is_fsdp_enabled: if ("FULL_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type)) and ( version.parse(accelerate_version) > version.parse("0.24.1") ): state_dict = self.accelerator.get_state_dict(self.model) if self.args.should_save: self._save(output_dir, state_dict=state_dict) elif self.is_deepspeed_enabled: try: state_dict = self.accelerator.get_state_dict(self.deepspeed) if self.args.should_save: self._save(output_dir, state_dict=state_dict) except ValueError: logger.warning( " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use" " zero_to_fp32.py to recover weights" ) if self.args.should_save: self._save(output_dir, state_dict={}) # remove the dummy state_dict remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME]) self.model_wrapped.save_checkpoint(output_dir) elif self.args.should_save: self._save(output_dir) # Push to the Hub when `save_model` is called by the user. if self.args.push_to_hub and not _internal_call: self.push_to_hub(commit_message="Model save", revision=self.args.hub_revision) def _save_tpu(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.args.output_dir logger.info(f"Saving model checkpoint to {output_dir}") model = self.model xm.mark_step() if xm.is_master_ordinal(local=False): os.makedirs(output_dir, exist_ok=True) torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` supported_classes = (PushToHubMixin,) xm.rendezvous("saving_checkpoint") if self.is_fsdp_xla_v1_enabled: ckpt = { "model": model.state_dict(), "shard_metadata": model.get_shard_metadata(), } ckpt_path = os.path.join( output_dir, f"rank{self.args.process_index}-of-{self.args.world_size}-{WEIGHTS_NAME}" ) # All ranks save sharded checkpoint xm.save(ckpt, ckpt_path, master_only=False) # Make sure all ranks have saved checkpoints xm.rendezvous("save_full_checkpoints") # Master save full checkpoint if self.args.should_save: from torch_xla.distributed.fsdp import consolidate_sharded_model_checkpoints full_state_dict, _ = consolidate_sharded_model_checkpoints( ckpt_prefix=os.path.join(output_dir, ""), ckpt_suffix=f"rank*-of-*-{WEIGHTS_NAME}", save_model=False, ) model = model.module.module unwrapped_model = self.accelerator.unwrap_model(model) if isinstance(unwrapped_model, supported_classes): unwrapped_model.save_pretrained( output_dir, state_dict=full_state_dict, save_function=xm.save, safe_serialization=self.args.save_safetensors, ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") xm.save(full_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) elif not isinstance(model, supported_classes): if isinstance(self.accelerator.unwrap_model(model), supported_classes): self.accelerator.unwrap_model(model).save_pretrained( output_dir, is_main_process=self.args.should_save, state_dict=xm._maybe_convert_to_cpu(model.state_dict()), save_function=xm.save, safe_serialization=self.args.save_safetensors, ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") state_dict = xm._maybe_convert_to_cpu(model.state_dict()) xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: model.save_pretrained( output_dir, is_main_process=self.args.should_save, save_function=xm.save, safe_serialization=self.args.save_safetensors, state_dict=xm._maybe_convert_to_cpu(model.state_dict()), ) if self.processing_class is not None and self.args.should_save: self.processing_class.save_pretrained(output_dir) def _save(self, output_dir: Optional[str] = None, state_dict=None): # If we are executing this function, we are the process zero, so we don't check for that. output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") supported_classes = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, supported_classes): if state_dict is None: state_dict = self.model.state_dict() if isinstance(self.accelerator.unwrap_model(self.model, keep_torch_compile=False), supported_classes): self.accelerator.unwrap_model(self.model, keep_torch_compile=False).save_pretrained( output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") if self.args.save_safetensors: safetensors.torch.save_file( state_dict, os.path.join(output_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"} ) else: torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained( output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors ) if self.processing_class is not None: self.processing_class.save_pretrained(output_dir) elif ( self.data_collator is not None and hasattr(self.data_collator, "tokenizer") and self.data_collator.tokenizer is not None ): logger.info("Saving Trainer.data_collator.tokenizer by default as Trainer.processing_class is `None`") self.data_collator.tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def store_flos(self): # Storing the number of floating-point operations that went into the model if self.args.parallel_mode == ParallelMode.DISTRIBUTED: self.state.total_flos += ( distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item() ) self.current_flos = 0 else: self.state.total_flos += self.current_flos self.current_flos = 0 def _sorted_checkpoints( self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False ) -> list[str]: ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) # mtime is not reliable on all filesystems, especially on some fuse fs in cloud environments # so we check if the mtime is fake and fallback to numerical ordering if needed if use_mtime and len(ordering_and_checkpoint_path) > 1: mtime_diff = checkpoints_sorted[-1][0] - checkpoints_sorted[0][0] if mtime_diff < 1.0: # less than 1 second, which is almost impossible when mtime works fine warnings.warn("mtime may not be reliable on this filesystem, falling back to numerical ordering") return self._sorted_checkpoints( use_mtime=False, output_dir=output_dir, checkpoint_prefix=checkpoint_prefix ) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if ( self.state.best_model_checkpoint is not None and str(Path(self.state.best_model_checkpoint)) in checkpoints_sorted ): best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) for i in range(best_model_index, len(checkpoints_sorted) - 2): checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] return checkpoints_sorted def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit if ( self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1 and checkpoints_sorted[-1] != self.state.best_model_checkpoint ): save_total_limit = 2 number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) def evaluate( self, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, ignore_keys: Optional[list[str]] = None, metric_key_prefix: str = "eval", ) -> dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `compute_metrics` argument). You can also subclass and override this method to inject custom behavior. Args: eval_dataset (Union[`Dataset`, dict[str, `Dataset`]), *optional*): Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each dataset, prepending the dictionary key to the metric name. Datasets must implement the `__len__` method. <Tip> If you pass a dictionary with names of datasets as keys and datasets as values, evaluate will run separate evaluations on each dataset. This can be useful to monitor how training affects other datasets or simply to get a more fine-grained evaluation. When used with `load_best_model_at_end`, make sure `metric_for_best_model` references exactly one of the datasets. If you, for example, pass in `{"data1": data1, "data2": data2}` for two datasets `data1` and `data2`, you could specify `metric_for_best_model="eval_data1_loss"` for using the loss on `data1` and `metric_for_best_model="eval_data2_loss"` for the loss on `data2`. </Tip> ignore_keys (`list[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is "eval" (default) Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. """ # handle multiple eval datasets override = eval_dataset is not None eval_dataset = eval_dataset if override else self.eval_dataset if isinstance(eval_dataset, dict): metrics = {} for eval_dataset_name, _eval_dataset in eval_dataset.items(): dataset_metrics = self.evaluate( eval_dataset=_eval_dataset if override else eval_dataset_name, ignore_keys=ignore_keys, metric_key_prefix=f"{metric_key_prefix}_{eval_dataset_name}", ) metrics.update(dataset_metrics) return metrics # memory metrics - must set up as early as possible self._memory_tracker.start() eval_dataloader = self.get_eval_dataloader(eval_dataset) if self.is_fsdp_xla_v2_enabled: eval_dataloader = tpu_spmd_dataloader(eval_dataloader) start_time = time.time() eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop output = eval_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if self.compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix, ) total_batch_size = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] if f"{metric_key_prefix}_model_preparation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_model_preparation_time"] output.metrics.update( speed_metrics( metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ) ) self.log(output.metrics) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) self._memory_tracker.stop_and_update_metrics(output.metrics) return output.metrics def predict( self, test_dataset: Dataset, ignore_keys: Optional[list[str]] = None, metric_key_prefix: str = "test" ) -> PredictionOutput: """ Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `evaluate()`. Args: test_dataset (`Dataset`): Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the `model.forward()` method are automatically removed. Has to implement the method `__len__` ignore_keys (`list[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"test"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "test_bleu" if the prefix is "test" (default) <Tip> If your predictions or labels have different sequence length (for instance because you're doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100. </Tip> Returns: *NamedTuple* A namedtuple with the following keys: - predictions (`np.ndarray`): The predictions on `test_dataset`. - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). - metrics (`dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained labels). """ # memory metrics - must set up as early as possible self._memory_tracker.start() test_dataloader = self.get_test_dataloader(test_dataset) start_time = time.time() eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop output = eval_loop( test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix ) total_batch_size = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] if f"{metric_key_prefix}_model_preparation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_model_preparation_time"] output.metrics.update( speed_metrics( metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ) ) self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics) self._memory_tracker.stop_and_update_metrics(output.metrics) return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics) def evaluation_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[list[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: """ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ args = self.args prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only # if eval is called w/o train, handle model prep here if self.is_deepspeed_enabled and self.deepspeed is None: _, _ = deepspeed_init(self, num_training_steps=0, inference=True) model = self._wrap_model(self.model, training=False, dataloader=dataloader) if len(self.accelerator._models) == 0 and model is self.model: start_time = time.time() model = ( self.accelerator.prepare(model) if self.is_deepspeed_enabled or (self.is_fsdp_enabled and self.accelerator.mixed_precision != "fp8" and not self.args.torch_compile) else self.accelerator.prepare_model(model, evaluation_mode=True) ) self.model_preparation_time = round(time.time() - start_time, 4) if self.is_fsdp_enabled: self.model = model # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model # backward compatibility if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called # while ``train`` is running, cast it to the right dtype first and then put on device if not self.is_in_train: if args.fp16_full_eval: model = model.to(dtype=torch.float16, device=args.device) elif args.bf16_full_eval: model = model.to(dtype=torch.bfloat16, device=args.device) batch_size = self.args.eval_batch_size logger.info(f"\n***** Running {description} *****") if has_length(dataloader): logger.info(f" Num examples = {self.num_examples(dataloader)}") else: logger.info(" Num examples: Unknown") logger.info(f" Batch size = {batch_size}") if hasattr(model, "eval") and callable(model.eval): model.eval() if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval): self.optimizer.eval() self.callback_handler.eval_dataloader = dataloader # Do this before wrapping. eval_dataset = getattr(dataloader, "dataset", None) if args.past_index >= 0: self._past = None # Initialize containers all_losses = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) all_preds = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) all_labels = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) all_inputs = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) metrics = None eval_set_kwargs = {} # Will be useful when we have an iterable dataset so don't know its length. observed_num_examples = 0 # Main evaluation loop for step, inputs in enumerate(dataloader): # Update the observed num examples observed_batch_size = find_batch_size(inputs) if observed_batch_size is not None: observed_num_examples += observed_batch_size # For batch samplers, batch_size is not known by the dataloader in advance. if batch_size is None: batch_size = observed_batch_size # Prediction step losses, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) main_input_name = getattr(self.model, "main_input_name", "input_ids") inputs_decode = ( self._prepare_input(inputs[main_input_name]) if "inputs" in args.include_for_metrics else None ) if is_torch_xla_available(): xm.mark_step() # Update containers if losses is not None: losses = self.gather_function(losses.repeat(batch_size)) all_losses.add(losses) if inputs_decode is not None: inputs_decode = self.accelerator.pad_across_processes(inputs_decode, dim=1, pad_index=-100) inputs_decode = self.gather_function(inputs_decode) if not self.args.batch_eval_metrics or description == "Prediction": all_inputs.add(inputs_decode) if labels is not None: # Pad labels here, preparing for preprocess_logits_for_metrics in next logits block. labels = self.accelerator.pad_across_processes(labels, dim=1, pad_index=-100) if logits is not None: logits = self.accelerator.pad_across_processes(logits, dim=1, pad_index=-100) if self.preprocess_logits_for_metrics is not None: logits = self.preprocess_logits_for_metrics(logits, labels) logits = self.gather_function(logits) if not self.args.batch_eval_metrics or description == "Prediction": all_preds.add(logits) if labels is not None: labels = self.gather_function(labels) if not self.args.batch_eval_metrics or description == "Prediction": all_labels.add(labels) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) if self.args.batch_eval_metrics: if self.compute_metrics is not None and logits is not None and labels is not None: is_last_step = self.accelerator.gradient_state.end_of_dataloader batch_kwargs = {} batch_kwargs["losses"] = losses if "loss" in args.include_for_metrics else None batch_kwargs["inputs"] = inputs if "inputs" in args.include_for_metrics else None metrics = self.compute_metrics( EvalPrediction(predictions=logits, label_ids=labels, **batch_kwargs), compute_result=is_last_step, ) del losses, logits, labels, inputs torch.cuda.empty_cache() # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. elif args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: all_losses.to_cpu_and_numpy() all_preds.to_cpu_and_numpy() all_labels.to_cpu_and_numpy() all_inputs.to_cpu_and_numpy() del losses, logits, labels, inputs torch.cuda.empty_cache() # After all calls to `.gather_function`, reset to `gather_for_metrics`: self.gather_function = self.accelerator.gather_for_metrics if args.past_index and hasattr(self, "_past"): # Clean the state at the end of the evaluation loop delattr(self, "_past") # Gather all remaining tensors and put them back on the CPU all_losses = all_losses.get_arrays() all_preds = all_preds.get_arrays() all_labels = all_labels.get_arrays() all_inputs = all_inputs.get_arrays() # Number of samples if has_length(eval_dataset): num_samples = len(eval_dataset) # The instance check is weird and does not actually check for the type, but whether the dataset has the right # methods. Therefore we need to make sure it also has the attribute. elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0: num_samples = eval_dataset.num_examples else: if has_length(dataloader): num_samples = self.num_examples(dataloader) else: # both len(dataloader.dataset) and len(dataloader) fail num_samples = observed_num_examples if num_samples == 0 and observed_num_examples > 0: num_samples = observed_num_examples # Metrics! if ( self.compute_metrics is not None and all_preds is not None and all_labels is not None and not self.args.batch_eval_metrics ): eval_set_kwargs["losses"] = all_losses if "loss" in args.include_for_metrics else None eval_set_kwargs["inputs"] = all_inputs if "inputs" in args.include_for_metrics else None metrics = self.compute_metrics( EvalPrediction(predictions=all_preds, label_ids=all_labels, **eval_set_kwargs) ) elif metrics is None: metrics = {} # To be JSON-serializable, we need to remove numpy types or zero-d tensors metrics = denumpify_detensorize(metrics) if isinstance(all_losses, list) and all_losses: metrics[f"{metric_key_prefix}_loss"] = np.concatenate(all_losses).mean().item() elif isinstance(all_losses, np.ndarray): metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() if hasattr(self, "jit_compilation_time"): metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time if hasattr(self, "model_preparation_time"): metrics[f"{metric_key_prefix}_model_preparation_time"] = self.model_preparation_time # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) def _nested_gather(self, tensors, name=None): """ Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before concatenating them to `gathered` """ if tensors is None: return if is_torch_xla_available(): if name is None: name = "nested_gather" tensors = nested_xla_mesh_reduce(tensors, name) elif is_sagemaker_mp_enabled(): tensors = smp_gather(tensors) elif (self.args.distributed_state is not None and self.args.distributed_state.distributed_type != "NO") or ( self.args.distributed_state is None and self.args.local_rank != -1 ): tensors = distributed_concat(tensors) return tensors def prediction_step( self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]] = None, ) -> tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on `model` using `inputs`. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to evaluate. inputs (`dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (`bool`): Whether or not to return the loss only. ignore_keys (`list[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. Return: tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names) # For CLIP-like models capable of returning loss values. # If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss` # is `True` in `model.forward`. return_loss = inputs.get("return_loss") if return_loss is None: return_loss = self.can_return_loss loss_without_labels = len(self.label_names) == 0 and return_loss inputs = self._prepare_inputs(inputs) if ignore_keys is None: if hasattr(self.model, "config"): ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", ["past_key_values"]) else: ignore_keys = [] # labels may be popped when computing the loss (label smoothing for instance) so we grab them first. if has_labels or loss_without_labels: labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) if len(labels) == 1: labels = labels[0] else: labels = None with torch.no_grad(): if is_sagemaker_mp_enabled(): raw_outputs = smp_forward_only(model, inputs) if has_labels or loss_without_labels: if isinstance(raw_outputs, dict): loss_mb = raw_outputs["loss"] logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"]) else: loss_mb = raw_outputs[0] logits_mb = raw_outputs[1:] loss = loss_mb.reduce_mean().detach().cpu() logits = smp_nested_concat(logits_mb) else: loss = None if isinstance(raw_outputs, dict): logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys) else: logits_mb = raw_outputs logits = smp_nested_concat(logits_mb) else: if has_labels or loss_without_labels: with self.compute_loss_context_manager(): loss, outputs = self.compute_loss(model, inputs, return_outputs=True) loss = loss.detach().mean() if isinstance(outputs, dict): logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"]) else: logits = outputs[1:] else: loss = None with self.compute_loss_context_manager(): outputs = model(**inputs) if isinstance(outputs, dict): logits = tuple(v for k, v in outputs.items() if k not in ignore_keys) else: logits = outputs # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index - 1] if prediction_loss_only: return (loss, None, None) logits = nested_detach(logits) if len(logits) == 1: logits = logits[0] return (loss, logits, labels) def floating_point_ops(self, inputs: dict[str, Union[torch.Tensor, Any]]): """ For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method. Args: inputs (`dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. Returns: `int`: The number of floating-point operations. """ if hasattr(self.model, "floating_point_ops"): return self.model.floating_point_ops(inputs) else: return 0 def init_hf_repo(self, token: Optional[str] = None): """ Initializes a git repo in `self.args.hub_model_id`. """ # Only on process zero if not self.is_world_process_zero(): return if self.args.hub_model_id is None: repo_name = Path(self.args.output_dir).absolute().name else: repo_name = self.args.hub_model_id token = token if token is not None else self.args.hub_token repo_url = create_repo(repo_name, token=token, private=self.args.hub_private_repo, exist_ok=True) self.hub_model_id = repo_url.repo_id self.push_in_progress = None def create_model_card( self, language: Optional[str] = None, license: Optional[str] = None, tags: Union[str, list[str], None] = None, model_name: Optional[str] = None, finetuned_from: Optional[str] = None, tasks: Union[str, list[str], None] = None, dataset_tags: Union[str, list[str], None] = None, dataset: Union[str, list[str], None] = None, dataset_args: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: language (`str`, *optional*): The language of the model (if applicable) license (`str`, *optional*): The license of the model. Will default to the license of the pretrained model used, if the original model given to the `Trainer` comes from a repo on the Hub. tags (`str` or `list[str]`, *optional*): Some tags to be included in the metadata of the model card. model_name (`str`, *optional*): The name of the model. finetuned_from (`str`, *optional*): The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo of the original model given to the `Trainer` (if it comes from the Hub). tasks (`str` or `list[str]`, *optional*): One or several task identifiers, to be included in the metadata of the model card. dataset_tags (`str` or `list[str]`, *optional*): One or several dataset tags, to be included in the metadata of the model card. dataset (`str` or `list[str]`, *optional*): One or several dataset identifiers, to be included in the metadata of the model card. dataset_args (`str` or `list[str]`, *optional*): One or several dataset arguments, to be included in the metadata of the model card. """ if not self.is_world_process_zero(): return model_card_filepath = os.path.join(self.args.output_dir, "README.md") is_peft_library = False if os.path.exists(model_card_filepath): library_name = ModelCard.load(model_card_filepath).data.get("library_name") is_peft_library = library_name == "peft" # Append existing tags in `tags` existing_tags = ModelCard.load(model_card_filepath).data.tags if tags is not None and existing_tags is not None: if isinstance(tags, str): tags = [tags] for tag in existing_tags: if tag not in tags: tags.append(tag) training_summary = TrainingSummary.from_trainer( self, language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, ) model_card = training_summary.to_model_card() with open(model_card_filepath, "w") as f: f.write(model_card) if is_peft_library: self.accelerator.unwrap_model(self.model).create_or_update_model_card(self.args.output_dir) def _push_from_checkpoint(self, checkpoint_folder): # Only push from one node. if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END: return # If we haven't finished the last push, we don't do this one unless args.hub_always_push=True. if not self.args.hub_always_push and self.push_in_progress is not None and not self.push_in_progress.is_done(): return output_dir = self.args.output_dir # To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder modeling_files = [CONFIG_NAME, WEIGHTS_NAME, SAFE_WEIGHTS_NAME] # Add sharded checkpoints if we have an index for index_file in [WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_INDEX_NAME]: index_path = os.path.join(checkpoint_folder, index_file) if os.path.isfile(index_path): modeling_files.append(index_file) with open(index_path) as f: index = json.loads(f.read()) shard_files = list(set(index["weight_map"].values())) modeling_files.extend(shard_files) if is_peft_available(): modeling_files.extend([ADAPTER_CONFIG_NAME, ADAPTER_WEIGHTS_NAME, ADAPTER_SAFE_WEIGHTS_NAME]) for modeling_file in modeling_files: if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)): shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file)) # Saving the processing class is fast and we don't know how many files it may have spawned, so we resave it to be sure. if self.processing_class is not None: self.processing_class.save_pretrained(output_dir) # Same for the training arguments torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) if self.args.save_strategy == SaveStrategy.STEPS: commit_message = f"Training in progress, step {self.state.global_step}" else: commit_message = f"Training in progress, epoch {int(self.state.epoch)}" model_push_job = upload_folder( repo_id=self.hub_model_id, folder_path=output_dir, commit_message=commit_message, token=self.args.hub_token, run_as_future=True, ignore_patterns=["_*", f"{PREFIX_CHECKPOINT_DIR}-*"], revision=self.args.hub_revision, ) push_jobs = [model_push_job] if self.args.hub_strategy in [HubStrategy.CHECKPOINT, HubStrategy.ALL_CHECKPOINTS]: path_in_repo = ( "last-checkpoint" if self.args.hub_strategy == HubStrategy.CHECKPOINT else Path(checkpoint_folder).name ) checkpoint_push = upload_folder( repo_id=self.hub_model_id, folder_path=checkpoint_folder, path_in_repo=path_in_repo, commit_message=commit_message + ", checkpoint", token=self.args.hub_token, run_as_future=True, revision=self.args.hub_revision, ) push_jobs.append(checkpoint_push) if self.push_in_progress is None or self.push_in_progress.is_done(): self.push_in_progress = PushInProgress(push_jobs) else: self.push_in_progress.jobs.extend(push_jobs) def _finish_current_push(self): if not hasattr(self, "push_in_progress"): return if self.push_in_progress is not None and not self.push_in_progress.is_done(): logger.info("Waiting for the current checkpoint push to be finished, this might take a couple of minutes.") self.push_in_progress.wait_until_done() def push_to_hub( self, commit_message: Optional[str] = "End of training", blocking: bool = True, token: Optional[str] = None, revision: Optional[str] = None, **kwargs, ) -> str: """ Upload `self.model` and `self.processing_class` to the 🤗 model hub on the repo `self.args.hub_model_id`. Parameters: commit_message (`str`, *optional*, defaults to `"End of training"`): Message to commit while pushing. blocking (`bool`, *optional*, defaults to `True`): Whether the function should return only when the `git push` has finished. token (`str`, *optional*, defaults to `None`): Token with write permission to overwrite Trainer's original args. revision (`str`, *optional*): The git revision to commit from. Defaults to the head of the "main" branch. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments passed along to [`~Trainer.create_model_card`]. Returns: The URL of the repository where the model was pushed if `blocking=False`, or a `Future` object tracking the progress of the commit if `blocking=True`. """ model_name = kwargs.pop("model_name", None) if model_name is None and self.args.should_save: if self.args.hub_model_id is None: model_name = Path(self.args.output_dir).name else: model_name = self.args.hub_model_id.split("/")[-1] token = token if token is not None else self.args.hub_token # In case the user calls this method with args.push_to_hub = False if self.hub_model_id is None: self.init_hf_repo(token=token) # Needs to be executed on all processes for TPU training, but will only save on the processed determined by # self.args.should_save. self.save_model(_internal_call=True) # Only push from one node. if not self.is_world_process_zero(): return # Add additional tags in the case the model has already some tags and users pass # "tags" argument to `push_to_hub` so that trainer automatically handles internal tags # from all models since Trainer does not call `model.push_to_hub`. if getattr(self.model, "model_tags", None) is not None: if "tags" not in kwargs: kwargs["tags"] = [] # If it is a string, convert it to a list if isinstance(kwargs["tags"], str): kwargs["tags"] = [kwargs["tags"]] for model_tag in self.model.model_tags: if model_tag not in kwargs["tags"]: kwargs["tags"].append(model_tag) self.create_model_card(model_name=model_name, **kwargs) if revision is None: revision = self.args.hub_revision # Wait for the current upload to be finished. self._finish_current_push() return upload_folder( repo_id=self.hub_model_id, folder_path=self.args.output_dir, commit_message=commit_message, token=token, run_as_future=not blocking, ignore_patterns=["_*", f"{PREFIX_CHECKPOINT_DIR}-*"], revision=revision, ) # # Deprecated code # def prediction_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[list[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: """ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ args = self.args if not has_length(dataloader): raise ValueError("dataloader must implement a working __len__") prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only # if eval is called w/o train, handle model prep here if self.is_deepspeed_enabled and self.deepspeed is None: _, _ = deepspeed_init(self, num_training_steps=0, inference=True) model = self._wrap_model(self.model, training=False, dataloader=dataloader) if len(self.accelerator._models) == 0 and model is self.model: model = ( self.accelerator.prepare(model) if self.is_deepspeed_enabled or self.is_fsdp_enabled else self.accelerator.prepare_model(model, evaluation_mode=True) ) if self.is_fsdp_enabled: self.model = model # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model # backward compatibility if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called # while ``train`` is running, cast it to the right dtype first and then put on device if not self.is_in_train: if args.fp16_full_eval: model = model.to(dtype=torch.float16, device=args.device) elif args.bf16_full_eval: model = model.to(dtype=torch.bfloat16, device=args.device) batch_size = ( dataloader.total_batch_size if getattr(dataloader, "_is_accelerate_prepared", False) else dataloader.batch_size ) if batch_size is None: raise ValueError( "Batch size cannot be None. Ensure the dataloader has a valid batch_size or total_batch_size." ) num_examples = self.num_examples(dataloader) logger.info(f"\n***** Running {description} *****") logger.info(f" Num examples = {num_examples}") logger.info(f" Batch size = {batch_size}") losses_host: Optional[torch.Tensor] = None preds_host: Union[torch.Tensor, list[torch.Tensor], None] = None labels_host: Union[torch.Tensor, list[torch.Tensor], None] = None inputs_host: Union[torch.Tensor, list[torch.Tensor], None] = None metrics: Optional[dict] = None eval_set_kwargs: dict = {} world_size = max(1, args.world_size) eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size) if not prediction_loss_only: # The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass # a batch size to the sampler) make_multiple_of = None if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler): make_multiple_of = dataloader.sampler.batch_size preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) model.eval() if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval): self.optimizer.eval() if args.past_index >= 0: self._past = None self.callback_handler.eval_dataloader = dataloader for step, inputs in enumerate(dataloader): loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) main_input_name = getattr(self.model, "main_input_name", "input_ids") inputs_decode = ( self._prepare_input(inputs[main_input_name]) if "inputs" in args.include_for_metrics else None ) if loss is not None: losses = loss.repeat(batch_size) losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) if logits is not None: preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) if labels is not None: labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) if inputs_decode is not None: inputs_host = ( inputs_decode if inputs_host is None else nested_concat(inputs_host, inputs_decode, padding_index=-100) ) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) if self.args.batch_eval_metrics: if self.compute_metrics is not None and preds_host is not None and labels_host is not None: is_last_step = self.accelerator.gradient_state.end_of_dataloader batch_kwargs = {} batch_kwargs["losses"] = losses_host if "loss" in args.include_for_metrics else None batch_kwargs["inputs"] = inputs_host if "inputs" in args.include_for_metrics else None metrics = self.compute_metrics( EvalPrediction(predictions=preds_host, label_ids=labels_host, **batch_kwargs), compute_result=is_last_step, ) if self.args.batch_eval_metrics or ( args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0 ): # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) # Set back to None to begin a new accumulation del losses_host, preds_host, labels_host, inputs_host torch.cuda.empty_cache() losses_host, preds_host, labels_host, inputs_host = None, None, None, None if args.past_index and hasattr(self, "_past"): # Clean the state at the end of the evaluation loop delattr(self, "_past") # Gather all remaining tensors and put them back on the CPU eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) eval_loss = eval_losses_gatherer.finalize() preds = preds_gatherer.finalize() if not prediction_loss_only else None label_ids = labels_gatherer.finalize() if not prediction_loss_only else None inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None if ( self.compute_metrics is not None and preds is not None and label_ids is not None and not self.args.batch_eval_metrics ): eval_set_kwargs["losses"] = eval_loss if "loss" in args.include_for_metrics else None eval_set_kwargs["inputs"] = inputs_ids if "inputs" in args.include_for_metrics else None metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids, **eval_set_kwargs)) elif metrics is None: metrics = {} # To be JSON-serializable, we need to remove numpy types or zero-d tensors metrics = denumpify_detensorize(metrics) if eval_loss is not None: metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item() # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples) def _gather_and_numpify(self, tensors, name): """ Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before concatenating them to `gathered` """ if tensors is None: return if is_torch_xla_available(): tensors = nested_xla_mesh_reduce(tensors, name) elif is_sagemaker_mp_enabled(): tensors = smp_gather(tensors) elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: tensors = distributed_concat(tensors) return nested_numpify(tensors) def _add_sm_patterns_to_gitignore(self) -> None: """Add SageMaker Checkpointing patterns to .gitignore file.""" # Make sure we only do this on the main process if not self.is_world_process_zero(): return patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"] # Get current .gitignore content if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")): with open(os.path.join(self.repo.local_dir, ".gitignore")) as f: current_content = f.read() else: current_content = "" # Add the patterns to .gitignore content = current_content for pattern in patterns: if pattern not in content: if content.endswith("\n"): content += pattern else: content += f"\n{pattern}" # Write the .gitignore file if it has changed if content != current_content: with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f: logger.debug(f"Writing .gitignore file. Content: {content}") f.write(content) self.repo.git_add(".gitignore") # avoid race condition with git status time.sleep(0.5) if not self.repo.is_repo_clean(): self.repo.git_commit("Add *.sagemaker patterns to .gitignore.") self.repo.git_push() def create_accelerator_and_postprocess(self): # We explicitly don't rely on the `Accelerator` to do gradient accumulation grad_acc_kwargs = {} if is_accelerate_available("0.28.0") and self.args.accelerator_config.gradient_accumulation_kwargs is not None: grad_acc_kwargs = self.args.accelerator_config.gradient_accumulation_kwargs # check if num_steps is attempted to be passed in gradient_accumulation_kwargs if "num_steps" in grad_acc_kwargs: if self.args.gradient_accumulation_steps > 1: # raise because we do not know which setting is intended. raise ValueError( "The `AcceleratorConfig`'s `num_steps` is set but `gradient_accumulation_steps` is greater than 1 in the passed `TrainingArguments`" "If using the passed `AcceleratorConfig` is desired, do not set the `TrainingArguments` `gradient_accumulation_steps`." ) else: self.args.gradient_accumulation_steps = grad_acc_kwargs["num_steps"] accelerator_config = self.args.accelerator_config.to_dict() if is_accelerate_available("0.28.0"): # Extract dataloader config params from accelerator config dataloader_params = ["split_batches", "dispatch_batches", "even_batches", "use_seedable_sampler"] dataloader_config = DataLoaderConfiguration( **{param: accelerator_config.pop(param) for param in dataloader_params} ) if is_accelerate_available("1.1.0"): dataloader_config.data_seed = self.args.data_seed non_blocking = accelerator_config.pop("non_blocking") if not is_accelerate_available("0.30.0"): if non_blocking: raise ImportError( "`non_blocking` is only supported in accelerate v0.30.0 and above. Please upgrade accelerate to use this feature." ) else: if non_blocking and not self.args.dataloader_pin_memory: logger.warning( "`non_blocking` is enabled but `dataloader_pin_memory` is not. For the best performance, it's recommended to enable both." ) dataloader_config.non_blocking = non_blocking # this would have been updated above, no need for it anymore accelerator_config.pop("gradient_accumulation_kwargs") args = { "deepspeed_plugin": self.args.deepspeed_plugin, } if is_accelerate_available("0.28.0"): args["dataloader_config"] = dataloader_config else: args.update(accelerator_config) # tp is initialized at Accelerator init phase so # args should be prepared here if hasattr(self.model, "tp_size") and self.model.tp_size is not None and self.model.tp_size > 1: self.is_tp_enabled = True if version.parse(accelerate_version) > version.parse("1.3.0"): args["torch_tp_plugin"] = TorchTensorParallelPlugin(tp_size=self.model.tp_size) else: raise ValueError("Requires accelerate>1.3.0 to use Tensor Parallelism.") # create accelerator object self.accelerator = Accelerator(**args) # some Trainer classes need to use `gather` instead of `gather_for_metrics`, thus we store a flag self.gather_function = self.accelerator.gather_for_metrics if "use_gather_object" in inspect.signature(self.gather_function).parameters: self.gather_function = functools.partial( self.gather_function, use_gather_object=self.args.eval_use_gather_object ) # deepspeed and accelerate flags covering both trainer args and accelerate launcher self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None self.is_tp_enabled = getattr(self.accelerator.state, "torch_tp_plugin", None) is not None # post accelerator creation setup if self.is_fsdp_enabled: fsdp_plugin = self.accelerator.state.fsdp_plugin for param in ["limit_all_gathers", "activation_checkpointing"]: setattr(fsdp_plugin, param, self.args.fsdp_config.get(param, getattr(fsdp_plugin, param))) if fsdp_plugin.activation_checkpointing and self.args.gradient_checkpointing: raise ValueError( "The activation_checkpointing in FSDP config and the gradient_checkpointing in training arg " "can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic " "when using FSDP." ) if self.is_deepspeed_enabled and getattr(self.args, "hf_deepspeed_config", None) is None: self.propagate_args_to_deepspeed() # `save_only_model` can't be used with DeepSpeed/FSDP along with `load_best_model_at_end` if ( self.args.save_only_model and (self.is_deepspeed_enabled or self.is_fsdp_enabled) and self.args.load_best_model_at_end ): wrapper = "DeepSpeed" if self.is_deepspeed_enabled else "FSDP" raise ValueError(f"{wrapper} can't be used with `save_only_model` along with `load_best_model_at_end`.") # `auto_find_batch_size` isn't supported yet with DeepSpeed Zero-3 if ( self.is_deepspeed_enabled and self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.args.auto_find_batch_size ): raise ValueError( "`auto_find_batch_size` isn't supported yet with DeepSpeed Zero-3. Please consider using Zero-2, Zero-1, or FSDP" ) if ( self.args.save_only_model and self.is_fsdp_enabled and "SHARDED_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type) ): raise ValueError("save_only_model option is not compatible with FSDP state dict type 'SHARDED_STATE_DICT'") def propagate_args_to_deepspeed(self, auto_find_batch_size=False): """ Sets values in the deepspeed plugin based on the Trainer args """ from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig ds_plugin = self.accelerator.state.deepspeed_plugin ds_plugin.hf_ds_config = HfTrainerDeepSpeedConfig(ds_plugin.hf_ds_config.config) ds_plugin.deepspeed_config = ds_plugin.hf_ds_config.config ds_plugin.hf_ds_config.trainer_config_process(self.args, auto_find_batch_size) def _fsdp_qlora_plugin_updates(self): if self.is_fsdp_enabled and _is_peft_model(self.model): from peft import PeftConfig from peft.utils.other import fsdp_auto_wrap_policy if isinstance(self.model.active_peft_config, PeftConfig): self.accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(self.model) if ( getattr(self.model, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES and self.model.hf_quantizer.quantization_config.bnb_4bit_quant_storage.is_floating_point and version.parse(accelerate_version) > version.parse("0.27.0") ): self.accelerator.state.fsdp_plugin.set_mixed_precision( self.model.hf_quantizer.quantization_config.bnb_4bit_quant_storage, override=True ) def get_batch_samples( self, epoch_iterator: Iterator, num_batches: int, device: torch.device ) -> tuple[list, Optional[torch.Tensor]]: """ Collects a specified number of batches from the epoch iterator and optionally counts the number of items in the batches to properly scale the loss. """ batch_samples = [] num_items_in_batch = None for _ in range(num_batches): try: batch_samples.append(next(epoch_iterator)) except StopIteration: break count_num_items_in_batch = ( len(batch_samples) > 0 and "labels" in batch_samples[0] and ( # num_items_in_batch is passed to model forward # https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/trainer.py#L3757 self.model_accepts_loss_kwargs # num_items_in_batch is passed to compute_loss_func # https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/trainer.py#L3773 or self.compute_loss_func is not None # num_items_in_batch is also verified if (self.model_accepts_loss_kwargs or self.compute_loss_func) # https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/trainer.py#L3790 ) ) if count_num_items_in_batch: # For now we don't support object detection try: num_items_in_batch = sum([(batch["labels"].ne(-100)).sum() for batch in batch_samples]) except (TypeError, AttributeError): pass if num_items_in_batch is not None: if self.args.average_tokens_across_devices: num_items_in_batch = self.accelerator.gather(num_items_in_batch).sum() if torch.is_tensor(num_items_in_batch): num_items_in_batch = num_items_in_batch.to(device) if self.args.n_gpu > 1 and num_items_in_batch.dim() == 0: # In the DataParallel case, convert the scalar tensor into a 1-dim tensor num_items_in_batch = num_items_in_batch.unsqueeze(0) return batch_samples, num_items_in_batch def set_initial_training_values( self, args: TrainingArguments, dataloader: DataLoader, total_train_batch_size: int ): """ Calculates and returns the following values: - `num_train_epochs` - `num_update_steps_per_epoch` - `num_examples` - `num_train_samples` - `epoch_based` - `len_dataloader` - `max_steps` """ # Case 1: we rely on `args.max_steps` first max_steps = args.max_steps # If max_steps is negative, we use the number of epochs to determine the number of total steps later epoch_based = max_steps < 0 len_dataloader = len(dataloader) if has_length(dataloader) else None # Case 2: We have a dataloader length and can extrapolate if len_dataloader is not None: num_update_steps_per_epoch = max( len_dataloader // args.gradient_accumulation_steps + int(len_dataloader % args.gradient_accumulation_steps > 0), 1, ) # Case 3: We have a length but are using epochs, we can extrapolate the number of steps if epoch_based: max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) # Now we figure out `num_examples`, `num_train_epochs`, and `train_samples` if len_dataloader: num_examples = self.num_examples(dataloader) if args.max_steps > 0: num_train_epochs = max_steps // num_update_steps_per_epoch + int( max_steps % num_update_steps_per_epoch > 0 ) # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's # the best we can do. num_train_samples = max_steps * total_train_batch_size else: num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples(dataloader) * args.num_train_epochs elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size # Setting a very large number of epochs so we go as many times as necessary over the iterator. num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps num_train_samples = args.max_steps * total_train_batch_size else: raise ValueError( "args.max_steps must be set to a positive value if dataloader does not have a length, was" f" {args.max_steps}" ) return ( num_train_epochs, num_update_steps_per_epoch, num_examples, num_train_samples, epoch_based, len_dataloader, max_steps, )
transformers/src/transformers/trainer.py/0
{ "file_path": "transformers/src/transformers/trainer.py", "repo_id": "transformers", "token_count": 125594 }
552
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Doc utilities: Utilities related to documentation """ import functools import inspect import re import textwrap import types from collections import OrderedDict def get_docstring_indentation_level(func): """Return the indentation level of the start of the docstring of a class or function (or method).""" # We assume classes are always defined in the global scope if inspect.isclass(func): return 4 source = inspect.getsource(func) first_line = source.splitlines()[0] function_def_level = len(first_line) - len(first_line.lstrip()) return 4 + function_def_level def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_start_docstrings_to_model_forward(*docstr): def docstring_decorator(fn): class_name = f"[`{fn.__qualname__.split('.')[0]}`]" intro = rf""" The {class_name} forward method, overrides the `__call__` special method. <Tip> Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`] instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them. </Tip> """ correct_indentation = get_docstring_indentation_level(fn) current_doc = fn.__doc__ if fn.__doc__ is not None else "" try: first_non_empty = next(line for line in current_doc.splitlines() if line.strip() != "") doc_indentation = len(first_non_empty) - len(first_non_empty.lstrip()) except StopIteration: doc_indentation = correct_indentation docs = docstr # In this case, the correct indentation level (class method, 2 Python levels) was respected, and we should # correctly reindent everything. Otherwise, the doc uses a single indentation level if doc_indentation == 4 + correct_indentation: docs = [textwrap.indent(textwrap.dedent(doc), " " * correct_indentation) for doc in docstr] intro = textwrap.indent(textwrap.dedent(intro), " " * correct_indentation) docstring = "".join(docs) + current_doc fn.__doc__ = intro + docstring return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr) return fn return docstring_decorator PT_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of `torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ TF_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ def _get_indent(t): """Returns the indentation in the first line of t""" search = re.search(r"^(\s*)\S", t) return "" if search is None else search.groups()[0] def _convert_output_args_doc(output_args_doc): """Convert output_args_doc to display properly.""" # Split output_arg_doc in blocks argument/description indent = _get_indent(output_args_doc) blocks = [] current_block = "" for line in output_args_doc.split("\n"): # If the indent is the same as the beginning, the line is the name of new arg. if _get_indent(line) == indent: if len(current_block) > 0: blocks.append(current_block[:-1]) current_block = f"{line}\n" else: # Otherwise it's part of the description of the current arg. # We need to remove 2 spaces to the indentation. current_block += f"{line[2:]}\n" blocks.append(current_block[:-1]) # Format each block for proper rendering for i in range(len(blocks)): blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i]) blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i]) return "\n".join(blocks) def _prepare_output_docstrings(output_type, config_class, min_indent=None, add_intro=True): """ Prepares the return part of the docstring using `output_type`. """ output_docstring = output_type.__doc__ params_docstring = None if output_docstring is not None: # Remove the head of the docstring to keep the list of args only lines = output_docstring.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): params_docstring = "\n".join(lines[(i + 1) :]) params_docstring = _convert_output_args_doc(params_docstring) elif add_intro: raise ValueError( f"No `Args` or `Parameters` section is found in the docstring of `{output_type.__name__}`. Make sure it has " "docstring and contain either `Args` or `Parameters`." ) # Add the return introduction if add_intro: full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) else: full_output_type = str(output_type) intro = f"\nReturns:\n `{full_output_type}`" if params_docstring is not None: intro += ":\n" result = intro if params_docstring is not None: result += params_docstring # Apply minimum indent if necessary if min_indent is not None: lines = result.split("\n") # Find the indent of the first nonempty line i = 0 while len(lines[i]) == 0: i += 1 indent = len(_get_indent(lines[i])) # If too small, add indentation to all nonempty lines if indent < min_indent: to_add = " " * (min_indent - indent) lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] result = "\n".join(lines) return result FAKE_MODEL_DISCLAIMER = """ <Tip warning={true}> This example uses a random model as the real ones are all very big. To get proper results, you should use {real_checkpoint} instead of {fake_checkpoint}. If you get out-of-memory when loading that checkpoint, you can try adding `device_map="auto"` in the `from_pretrained` call. </Tip> """ PT_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt" ... ) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_token_class_ids = logits.argmax(-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]] >>> predicted_tokens_classes {expected_output} >>> labels = predicted_token_class_ids >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> tokenizer.decode(predict_answer_tokens, skip_special_tokens=True) {expected_output} >>> # target is "nice puppet" >>> target_start_index = torch.tensor([{qa_target_start_index}]) >>> target_end_index = torch.tensor([{qa_target_end_index}]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = outputs.loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example of single-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] {expected_output} >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) >>> labels = torch.tensor([1]) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} ``` Example of multi-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.arange(0, logits.shape[-1])[torch.sigmoid(logits).squeeze(dim=0) > 0.5] >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained( ... "{checkpoint}", num_labels=num_labels, problem_type="multi_label_classification" ... ) >>> labels = torch.sum( ... torch.nn.functional.one_hot(predicted_class_ids[None, :].clone(), num_classes=num_labels), dim=1 ... ).to(torch.float) >>> loss = model(**inputs, labels=labels).loss ``` """ PT_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) {expected_output} >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> # mask labels of non-{mask} tokens >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) {expected_loss} ``` """ PT_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ PT_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1 >>> # the linear classifier still needs to be trained >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_ids = torch.argmax(logits, dim=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1).item() >>> predicted_label = model.config.id2label[predicted_class_ids] >>> predicted_label {expected_output} >>> # compute loss - target_label is e.g. "down" >>> target_label = model.config.id2label[0] >>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_FRAME_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> probabilities = torch.sigmoid(logits[0]) >>> # labels is a one-hot array of shape (num_frames, num_speakers) >>> labels = (probabilities > 0.5).long() >>> labels[0].tolist() {expected_output} ``` """ PT_SPEECH_XVECTOR_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor( ... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True ... ) >>> with torch.no_grad(): ... embeddings = model(**inputs).embeddings >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() >>> # the resulting embeddings can be used for cosine similarity-based retrieval >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1) >>> similarity = cosine_sim(embeddings[0], embeddings[1]) >>> threshold = 0.7 # the optimal threshold is dataset-dependent >>> if similarity < threshold: ... print("Speakers are not the same!") >>> round(similarity.item(), 2) {expected_output} ``` """ PT_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ PT_SAMPLE_DOCSTRINGS = { "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, "TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": PT_MASKED_LM_SAMPLE, "LMHead": PT_CAUSAL_LM_SAMPLE, "BaseModel": PT_BASE_MODEL_SAMPLE, "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, "CTC": PT_SPEECH_CTC_SAMPLE, "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, "AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE, "AudioXVector": PT_SPEECH_XVECTOR_SAMPLE, "VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE, "ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE, } TEXT_TO_AUDIO_SPECTROGRAM_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class}, SpeechT5HifiGan >>> model = {model_class}.from_pretrained("{checkpoint}") >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt") >>> # generate speech >>> speech = model.generate(inputs["input_ids"], speaker_embeddings=speaker_embeddings, vocoder=vocoder) ``` """ TEXT_TO_AUDIO_WAVEFORM_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> model = {model_class}.from_pretrained("{checkpoint}") >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt") >>> # generate speech >>> speech = model(inputs["input_ids"]) ``` """ AUDIO_FRAME_CLASSIFICATION_SAMPLE = PT_SPEECH_FRAME_CLASS_SAMPLE AUDIO_XVECTOR_SAMPLE = PT_SPEECH_XVECTOR_SAMPLE IMAGE_TO_TEXT_SAMPLE = r""" Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, {model_class} >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) ``` """ DEPTH_ESTIMATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") >>> model.to(device) >>> # prepare image for the model >>> inputs = processor(images=image, return_tensors="pt").to(device) >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # interpolate to original size >>> post_processed_output = processor.post_process_depth_estimation( ... outputs, [(image.height, image.width)], ... ) >>> predicted_depth = post_processed_output[0]["predicted_depth"] ``` """ VIDEO_CLASSIFICATION_SAMPLE = r""" Example: ```python ``` """ ZERO_SHOT_OBJECT_DETECTION_SAMPLE = r""" Example: ```python ``` """ IMAGE_TO_IMAGE_SAMPLE = r""" Example: ```python ``` """ IMAGE_FEATURE_EXTRACTION_SAMPLE = r""" Example: ```python ``` """ DOCUMENT_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python ``` """ NEXT_SENTENCE_PREDICTION_SAMPLE = r""" Example: ```python ``` """ MULTIPLE_CHOICE_SAMPLE = PT_MULTIPLE_CHOICE_SAMPLE PRETRAINING_SAMPLE = r""" Example: ```python ``` """ MASK_GENERATION_SAMPLE = r""" Example: ```python ``` """ VISUAL_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python ``` """ TEXT_GENERATION_SAMPLE = r""" Example: ```python ``` """ IMAGE_CLASSIFICATION_SAMPLE = PT_VISION_SEQ_CLASS_SAMPLE IMAGE_SEGMENTATION_SAMPLE = r""" Example: ```python ``` """ FILL_MASK_SAMPLE = r""" Example: ```python ``` """ OBJECT_DETECTION_SAMPLE = r""" Example: ```python ``` """ QUESTION_ANSWERING_SAMPLE = PT_QUESTION_ANSWERING_SAMPLE TEXT2TEXT_GENERATION_SAMPLE = r""" Example: ```python ``` """ TEXT_CLASSIFICATION_SAMPLE = PT_SEQUENCE_CLASSIFICATION_SAMPLE TABLE_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python ``` """ TOKEN_CLASSIFICATION_SAMPLE = PT_TOKEN_CLASSIFICATION_SAMPLE AUDIO_CLASSIFICATION_SAMPLE = PT_SPEECH_SEQ_CLASS_SAMPLE AUTOMATIC_SPEECH_RECOGNITION_SAMPLE = PT_SPEECH_CTC_SAMPLE ZERO_SHOT_IMAGE_CLASSIFICATION_SAMPLE = r""" Example: ```python ``` """ IMAGE_TEXT_TO_TEXT_GENERATION_SAMPLE = r""" Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, {model_class} >>> model = {model_class}.from_pretrained("{checkpoint}") >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> messages = [ ... {{ ... "role": "user", "content": [ ... {{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"}}, ... {{"type": "text", "text": "Where is the cat standing?"}}, ... ] ... }}, ... ] >>> inputs = processor.apply_chat_template( ... messages, ... tokenize=True, ... return_dict=True, ... return_tensors="pt", ... add_generation_prompt=True ... ) >>> # Generate >>> generate_ids = model.generate(**inputs) >>> processor.batch_decode(generate_ids, skip_special_tokens=True)[0] ``` """ PIPELINE_TASKS_TO_SAMPLE_DOCSTRINGS = OrderedDict( [ ("text-to-audio-spectrogram", TEXT_TO_AUDIO_SPECTROGRAM_SAMPLE), ("text-to-audio-waveform", TEXT_TO_AUDIO_WAVEFORM_SAMPLE), ("automatic-speech-recognition", AUTOMATIC_SPEECH_RECOGNITION_SAMPLE), ("audio-frame-classification", AUDIO_FRAME_CLASSIFICATION_SAMPLE), ("audio-classification", AUDIO_CLASSIFICATION_SAMPLE), ("audio-xvector", AUDIO_XVECTOR_SAMPLE), ("image-text-to-text", IMAGE_TEXT_TO_TEXT_GENERATION_SAMPLE), ("image-to-text", IMAGE_TO_TEXT_SAMPLE), ("visual-question-answering", VISUAL_QUESTION_ANSWERING_SAMPLE), ("depth-estimation", DEPTH_ESTIMATION_SAMPLE), ("video-classification", VIDEO_CLASSIFICATION_SAMPLE), ("zero-shot-image-classification", ZERO_SHOT_IMAGE_CLASSIFICATION_SAMPLE), ("image-classification", IMAGE_CLASSIFICATION_SAMPLE), ("zero-shot-object-detection", ZERO_SHOT_OBJECT_DETECTION_SAMPLE), ("object-detection", OBJECT_DETECTION_SAMPLE), ("image-segmentation", IMAGE_SEGMENTATION_SAMPLE), ("image-to-image", IMAGE_TO_IMAGE_SAMPLE), ("image-feature-extraction", IMAGE_FEATURE_EXTRACTION_SAMPLE), ("text-generation", TEXT_GENERATION_SAMPLE), ("table-question-answering", TABLE_QUESTION_ANSWERING_SAMPLE), ("document-question-answering", DOCUMENT_QUESTION_ANSWERING_SAMPLE), ("question-answering", QUESTION_ANSWERING_SAMPLE), ("text2text-generation", TEXT2TEXT_GENERATION_SAMPLE), ("next-sentence-prediction", NEXT_SENTENCE_PREDICTION_SAMPLE), ("multiple-choice", MULTIPLE_CHOICE_SAMPLE), ("text-classification", TEXT_CLASSIFICATION_SAMPLE), ("token-classification", TOKEN_CLASSIFICATION_SAMPLE), ("fill-mask", FILL_MASK_SAMPLE), ("mask-generation", MASK_GENERATION_SAMPLE), ("pretraining", PRETRAINING_SAMPLE), ] ) # Ordered dict to look for more specialized model mappings first # before falling back to the more generic ones. MODELS_TO_PIPELINE = OrderedDict( [ # Audio ("MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES", "text-to-audio-spectrogram"), ("MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES", "text-to-audio-waveform"), ("MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "automatic-speech-recognition"), ("MODEL_FOR_CTC_MAPPING_NAMES", "automatic-speech-recognition"), ("MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "audio-frame-classification"), ("MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "audio-classification"), ("MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "audio-xvector"), # Vision ("MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES", "image-text-to-text"), ("MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES", "image-to-text"), ("MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "visual-question-answering"), ("MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "depth-estimation"), ("MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "video-classification"), ("MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "zero-shot-image-classification"), ("MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "image-classification"), ("MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "zero-shot-object-detection"), ("MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "object-detection"), ("MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "image-segmentation"), ("MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES", "image-to-image"), ("MODEL_FOR_IMAGE_MAPPING_NAMES", "image-feature-extraction"), # Text/tokens ("MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "text-generation"), ("MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "table-question-answering"), ("MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "document-question-answering"), ("MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "question-answering"), ("MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "text2text-generation"), ("MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "next-sentence-prediction"), ("MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "multiple-choice"), ("MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "text-classification"), ("MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "token-classification"), ("MODEL_FOR_MASKED_LM_MAPPING_NAMES", "fill-mask"), ("MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "mask-generation"), ("MODEL_FOR_PRETRAINING_MAPPING_NAMES", "pretraining"), ] ) TF_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="tf" ... ) >>> logits = model(**inputs).logits >>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()] >>> predicted_tokens_classes {expected_output} ``` ```python >>> labels = predicted_token_class_ids >>> loss = tf.math.reduce_mean(model(**inputs, labels=labels).loss) >>> round(float(loss), 2) {expected_loss} ``` """ TF_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="tf") >>> outputs = model(**inputs) >>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0]) >>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0]) >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> tokenizer.decode(predict_answer_tokens) {expected_output} ``` ```python >>> # target is "nice puppet" >>> target_start_index = tf.constant([{qa_target_start_index}]) >>> target_end_index = tf.constant([{qa_target_end_index}]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = tf.math.reduce_mean(outputs.loss) >>> round(float(loss), 2) {expected_loss} ``` """ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> logits = model(**inputs).logits >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0]) >>> model.config.id2label[predicted_class_id] {expected_output} ``` ```python >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) >>> labels = tf.constant(1) >>> loss = model(**inputs, labels=labels).loss >>> round(float(loss), 2) {expected_loss} ``` """ TF_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf") >>> logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = tf.where((inputs.input_ids == tokenizer.mask_token_id)[0]) >>> selected_logits = tf.gather_nd(logits[0], indices=mask_token_index) >>> predicted_token_id = tf.math.argmax(selected_logits, axis=-1) >>> tokenizer.decode(predicted_token_id) {expected_output} ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] >>> # mask labels of non-{mask} tokens >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(float(outputs.loss), 2) {expected_loss} ``` """ TF_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ TF_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True) >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}} >>> outputs = model(inputs) # batch size is 1 >>> # the linear classifier still needs to be trained >>> logits = outputs.logits ``` """ TF_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> logits = outputs.logits ``` """ TF_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ TF_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> import tensorflow as tf >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") >>> logits = model(**inputs).logits >>> predicted_ids = tf.math.argmax(logits, axis=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} ``` ```python >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="tf").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(float(loss), 2) {expected_loss} ``` """ TF_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ TF_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import tensorflow as tf >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image")) >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="tf") >>> logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = int(tf.math.argmax(logits, axis=-1)) >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ TF_SAMPLE_DOCSTRINGS = { "SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE, "TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": TF_MASKED_LM_SAMPLE, "LMHead": TF_CAUSAL_LM_SAMPLE, "BaseModel": TF_BASE_MODEL_SAMPLE, "SpeechBaseModel": TF_SPEECH_BASE_MODEL_SAMPLE, "CTC": TF_SPEECH_CTC_SAMPLE, "VisionBaseModel": TF_VISION_BASE_MODEL_SAMPLE, "ImageClassification": TF_VISION_SEQ_CLASS_SAMPLE, } FLAX_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="jax") >>> outputs = model(**inputs) >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits ``` """ FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ FLAX_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True) >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}}) >>> logits = outputs.logits ``` """ FLAX_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np") >>> outputs = model(**inputs) >>> # retrieve logts for next token >>> next_token_logits = outputs.logits[:, -1] ``` """ FLAX_SAMPLE_DOCSTRINGS = { "SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE, "TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": FLAX_MASKED_LM_SAMPLE, "BaseModel": FLAX_BASE_MODEL_SAMPLE, "LMHead": FLAX_CAUSAL_LM_SAMPLE, } def filter_outputs_from_example(docstring, **kwargs): """ Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`. """ for key, value in kwargs.items(): if value is not None: continue doc_key = "{" + key + "}" docstring = re.sub(rf"\n([^\n]+)\n\s+{doc_key}\n", "\n", docstring) return docstring def add_code_sample_docstrings( *docstr, processor_class=None, checkpoint=None, output_type=None, config_class=None, mask="[MASK]", qa_target_start_index=14, qa_target_end_index=15, model_cls=None, modality=None, expected_output=None, expected_loss=None, real_checkpoint=None, revision=None, ): def docstring_decorator(fn): # model_class defaults to function's class if not specified otherwise model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls if model_class[:2] == "TF": sample_docstrings = TF_SAMPLE_DOCSTRINGS elif model_class[:4] == "Flax": sample_docstrings = FLAX_SAMPLE_DOCSTRINGS else: sample_docstrings = PT_SAMPLE_DOCSTRINGS # putting all kwargs for docstrings in a dict to be used # with the `.format(**doc_kwargs)`. Note that string might # be formatted with non-existing keys, which is fine. doc_kwargs = { "model_class": model_class, "processor_class": processor_class, "checkpoint": checkpoint, "mask": mask, "qa_target_start_index": qa_target_start_index, "qa_target_end_index": qa_target_end_index, "expected_output": expected_output, "expected_loss": expected_loss, "real_checkpoint": real_checkpoint, "fake_checkpoint": checkpoint, "true": "{true}", # For <Tip warning={true}> syntax that conflicts with formatting. } if ("SequenceClassification" in model_class or "AudioClassification" in model_class) and modality == "audio": code_sample = sample_docstrings["AudioClassification"] elif "SequenceClassification" in model_class: code_sample = sample_docstrings["SequenceClassification"] elif "QuestionAnswering" in model_class: code_sample = sample_docstrings["QuestionAnswering"] elif "TokenClassification" in model_class: code_sample = sample_docstrings["TokenClassification"] elif "MultipleChoice" in model_class: code_sample = sample_docstrings["MultipleChoice"] elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]: code_sample = sample_docstrings["MaskedLM"] elif "LMHead" in model_class or "CausalLM" in model_class: code_sample = sample_docstrings["LMHead"] elif "CTC" in model_class: code_sample = sample_docstrings["CTC"] elif "AudioFrameClassification" in model_class: code_sample = sample_docstrings["AudioFrameClassification"] elif "XVector" in model_class and modality == "audio": code_sample = sample_docstrings["AudioXVector"] elif "Model" in model_class and modality == "audio": code_sample = sample_docstrings["SpeechBaseModel"] elif "Model" in model_class and modality == "vision": code_sample = sample_docstrings["VisionBaseModel"] elif "Model" in model_class or "Encoder" in model_class: code_sample = sample_docstrings["BaseModel"] elif "ImageClassification" in model_class: code_sample = sample_docstrings["ImageClassification"] else: raise ValueError(f"Docstring can't be built for model {model_class}") code_sample = filter_outputs_from_example( code_sample, expected_output=expected_output, expected_loss=expected_loss ) if real_checkpoint is not None: code_sample = FAKE_MODEL_DISCLAIMER + code_sample func_doc = (fn.__doc__ or "") + "".join(docstr) output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class) built_doc = code_sample.format(**doc_kwargs) if revision is not None: if re.match(r"^refs/pr/\\d+", revision): raise ValueError( f"The provided revision '{revision}' is incorrect. It should point to" " a pull request reference on the hub like 'refs/pr/6'" ) built_doc = built_doc.replace( f'from_pretrained("{checkpoint}")', f'from_pretrained("{checkpoint}", revision="{revision}")' ) fn.__doc__ = func_doc + output_doc + built_doc return fn return docstring_decorator def replace_return_docstrings(output_type=None, config_class=None): def docstring_decorator(fn): func_doc = fn.__doc__ lines = func_doc.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: i += 1 if i < len(lines): indent = len(_get_indent(lines[i])) lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent) func_doc = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, " f"current docstring is:\n{func_doc}" ) fn.__doc__ = func_doc return fn return docstring_decorator def copy_func(f): """Returns a copy of a function f.""" # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g
transformers/src/transformers/utils/doc.py/0
{ "file_path": "transformers/src/transformers/utils/doc.py", "repo_id": "transformers", "token_count": 20492 }
553
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class ImageProcessingMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BaseImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageFeatureExtractionMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"])
transformers/src/transformers/utils/dummy_vision_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_vision_objects.py", "repo_id": "transformers", "token_count": 231 }
554
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import warnings from collections.abc import Iterable from contextlib import redirect_stdout from dataclasses import dataclass from io import BytesIO from typing import Callable, Optional, Union from urllib.parse import urlparse import numpy as np import requests from .image_transforms import PaddingMode, to_channel_dimension_format from .image_utils import ChannelDimension, infer_channel_dimension_format, is_valid_image from .utils import ( is_av_available, is_cv2_available, is_decord_available, is_numpy_array, is_torch_available, is_torch_tensor, is_torchcodec_available, is_torchvision_available, is_vision_available, is_yt_dlp_available, logging, requires_backends, ) if is_vision_available(): import PIL.Image import PIL.ImageOps if is_torchvision_available(): from torchvision import io as torchvision_io if is_torch_available(): import torch logger = logging.get_logger(__name__) VideoInput = Union[ list["PIL.Image.Image"], "np.ndarray", "torch.Tensor", list["np.ndarray"], list["torch.Tensor"], list[list["PIL.Image.Image"]], list[list["np.ndarrray"]], list[list["torch.Tensor"]], ] # noqa @dataclass class VideoMetadata: total_num_frames: int fps: float duration: float video_backend: str def __getitem__(self, item): return getattr(self, item) def is_valid_video_frame(frame): return isinstance(frame, PIL.Image.Image) or ( (is_numpy_array(frame) or is_torch_tensor(frame)) and frame.ndim == 3 ) def is_valid_video(video): if not isinstance(video, (list, tuple)): return (is_numpy_array(video) or is_torch_tensor(video)) and video.ndim == 4 return all(is_valid_video_frame(frame) for frame in video) def valid_videos(videos): # If we have a list of videos, it could be either one video as list of frames or a batch if isinstance(videos, (list, tuple)): for video_or_frame in videos: if not (is_valid_video(video_or_frame) or is_valid_video_frame(video_or_frame)): return False # If not a list, then we have a single 4D video or 5D batched tensor elif not is_valid_video(videos) or videos.ndim == 5: return False return True def is_batched_video(videos): if isinstance(videos, (list, tuple)): return is_valid_video(videos[0]) elif (is_numpy_array(videos) or is_torch_tensor(videos)) and videos.ndim == 5: return True return False def is_scaled_video(video: np.ndarray) -> bool: """ Checks to see whether the pixel values have already been rescaled to [0, 1]. """ # It's possible the video has pixel values in [0, 255] but is of floating type return np.min(video) >= 0 and np.max(video) <= 1 def convert_pil_frames_to_video(videos: list[VideoInput]) -> list[Union["np.ndarray", "torch.Tensor"]]: """ Given a batch of videos, converts each video to a 4D array. If video is already in array type, it is simply returned. We assume that all inputs in the list are in the same format, based on the type of the first element. Args: videos (`VideoInput`): Video inputs to turn into a list of videos. """ if not isinstance(videos[0], (list, tuple)): return videos video_converted = [] for video in videos: video = [np.array(frame) for frame in video] video = np.stack(video) video_converted.append(video) return video_converted def make_batched_videos(videos) -> list[Union["np.ndarray", "torch.Tensor"]]: """ Ensure that the input is a list of videos. If the input is a single video, it is converted to a list of length 1. If the input is a batch of videos, it is converted to a list of 4D video arrays. Videos passed as list `PIL.Image` frames are converted to 4D arrays. We assume that all inputs in the list are in the same format, based on the type of the first element. Args: videos (`VideoInput`): Video inputs to turn into a list of videos. """ if not valid_videos: raise ValueError( f"Invalid video input. Expected either a list of video frames or an input of 4 or 5 dimensions, but got" f" type {type(videos)}." ) if is_batched_video(videos): pass elif is_valid_video(videos): videos = [videos] # only one frame passed, thus we unsqueeze time dim elif is_valid_image(videos): videos = [np.array(videos)[None, ...]] # nested batch so we need to unflatten elif isinstance(videos[0], (list, tuple)) and is_valid_video(videos[0][0]): videos = [video for sublist in videos for video in sublist] return convert_pil_frames_to_video(videos) def get_video_size(video: np.ndarray, channel_dim: ChannelDimension = None) -> tuple[int, int]: """ Returns the (height, width) dimensions of the video. Args: video (`np.ndarray`): The video to get the dimensions of. channel_dim (`ChannelDimension`, *optional*): Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the video. Returns: A tuple of the video's height and width. """ if channel_dim is None: channel_dim = infer_channel_dimension_format(video) if channel_dim == ChannelDimension.FIRST: return video.shape[-2], video.shape[-1] elif channel_dim == ChannelDimension.LAST: return video.shape[-3], video.shape[-2] else: raise ValueError(f"Unsupported data format: {channel_dim}") def get_uniform_frame_indices(total_num_frames: int, num_frames: Optional[int] = None): """ Creates a numpy array for uniform sampling of `num_frame` frames from `total_num_frames` when loading a video. Args: total_num_frames (`int`): Total number of frames that a video has. num_frames (`int`, *optional*): Number of frames to sample uniformly. If not specified, all frames are sampled. Returns: np.ndarray: np array of frame indices that will be sampled. """ if num_frames is not None: indices = np.arange(0, total_num_frames, total_num_frames / num_frames).astype(int) else: indices = np.arange(0, total_num_frames).astype(int) return indices def default_sample_indices_fn(metadata: VideoMetadata, num_frames=None, fps=None, **kwargs): """ A default sampling function that replicates the logic used in get_uniform_frame_indices, while optionally handling `fps` if `num_frames` is not provided. Args: metadata (`VideoMetadata`): `VideoMetadata` object containing metadata about the video, such as "total_num_frames" or "fps". num_frames (`int`, *optional*): Number of frames to sample uniformly. fps (`int` or `float`, *optional*): Desired frames per second. Takes priority over num_frames if both are provided. Returns: `np.ndarray`: Array of frame indices to sample. """ total_num_frames = metadata.total_num_frames video_fps = metadata.fps # If num_frames is not given but fps is, calculate num_frames from fps if num_frames is None and fps is not None: num_frames = int(total_num_frames / video_fps * fps) if num_frames > total_num_frames: raise ValueError( f"When loading the video with fps={fps}, we computed num_frames={num_frames} " f"which exceeds total_num_frames={total_num_frames}. Check fps or video metadata." ) if num_frames is not None: indices = np.arange(0, total_num_frames, total_num_frames / num_frames, dtype=int) else: indices = np.arange(0, total_num_frames, dtype=int) return indices def read_video_opencv( video_path: str, sample_indices_fn: Callable, **kwargs, ): """ Decode a video using the OpenCV backend. Args: video_path (`str`): Path to the video file. sample_indices_fn (`Callable`): A callable function that will return indices at which the video should be sampled. If the video has to be loaded using by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`. If not provided, simple uniform sampling with fps is performed. Example: def sample_indices_fn(metadata, **kwargs): return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int) Returns: tuple[`np.array`, `VideoMetadata`]: A tuple containing: - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]). - `VideoMetadata` object. """ # Lazy import cv2 requires_backends(read_video_opencv, ["cv2"]) import cv2 video = cv2.VideoCapture(video_path) total_num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) video_fps = video.get(cv2.CAP_PROP_FPS) duration = total_num_frames / video_fps if video_fps else 0 metadata = VideoMetadata( total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend="opencv" ) indices = sample_indices_fn(metadata=metadata, **kwargs) index = 0 frames = [] while video.isOpened(): success, frame = video.read() if not success: break if index in indices: height, width, channel = frame.shape frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frames.append(frame[0:height, 0:width, 0:channel]) if success: index += 1 if index >= total_num_frames: break video.release() metadata.frames_indices = indices return np.stack(frames), metadata def read_video_decord( video_path: str, sample_indices_fn: Optional[Callable] = None, **kwargs, ): """ Decode a video using the Decord backend. Args: video_path (`str`): Path to the video file. sample_indices_fn (`Callable`, *optional*): A callable function that will return indices at which the video should be sampled. If the video has to be loaded using by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`. If not provided, simple uniform sampling with fps is performed. Example: def sample_indices_fn(metadata, **kwargs): return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int) Returns: tuple[`np.array`, `VideoMetadata`]: A tuple containing: - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]). - `VideoMetadata` object. """ # Lazy import from decord requires_backends(read_video_decord, ["decord"]) from decord import VideoReader, cpu vr = VideoReader(uri=video_path, ctx=cpu(0)) # decord has problems with gpu video_fps = vr.get_avg_fps() total_num_frames = len(vr) duration = total_num_frames / video_fps if video_fps else 0 metadata = VideoMetadata( total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend="decord" ) indices = sample_indices_fn(metadata=metadata, **kwargs) frames = vr.get_batch(indices).asnumpy() metadata.frames_indices = indices return frames, metadata def read_video_pyav( video_path: str, sample_indices_fn: Callable, **kwargs, ): """ Decode the video with PyAV decoder. Args: video_path (`str`): Path to the video file. sample_indices_fn (`Callable`, *optional*): A callable function that will return indices at which the video should be sampled. If the video has to be loaded using by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`. If not provided, simple uniform sampling with fps is performed. Example: def sample_indices_fn(metadata, **kwargs): return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int) Returns: tuple[`np.array`, `VideoMetadata`]: A tuple containing: - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]). - `VideoMetadata` object. """ # Lazy import av requires_backends(read_video_pyav, ["av"]) import av container = av.open(video_path) total_num_frames = container.streams.video[0].frames video_fps = container.streams.video[0].average_rate # should we better use `av_guess_frame_rate`? duration = total_num_frames / video_fps if video_fps else 0 metadata = VideoMetadata( total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend="pyav" ) indices = sample_indices_fn(metadata=metadata, **kwargs) frames = [] container.seek(0) end_index = indices[-1] for i, frame in enumerate(container.decode(video=0)): if i > end_index: break if i >= 0 and i in indices: frames.append(frame) video = np.stack([x.to_ndarray(format="rgb24") for x in frames]) metadata.frames_indices = indices return video, metadata def read_video_torchvision( video_path: str, sample_indices_fn: Callable, **kwargs, ): """ Decode the video with torchvision decoder. Args: video_path (`str`): Path to the video file. sample_indices_fn (`Callable`, *optional*): A callable function that will return indices at which the video should be sampled. If the video has to be loaded using by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`. If not provided, simple uniform sampling with fps is performed. Example: def sample_indices_fn(metadata, **kwargs): return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int) Returns: tuple[`np.array`, `VideoMetadata`]: A tuple containing: - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]). - `VideoMetadata` object. """ warnings.warn( "Using `torchvision` for video decoding is deprecated and will be removed in future versions. " "Please use `torchcodec` instead." ) video, _, info = torchvision_io.read_video( video_path, start_pts=0.0, end_pts=None, pts_unit="sec", output_format="THWC", ) video_fps = info["video_fps"] total_num_frames = video.size(0) duration = total_num_frames / video_fps if video_fps else 0 metadata = VideoMetadata( total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend="torchvision", ) indices = sample_indices_fn(metadata=metadata, **kwargs) video = video[indices].contiguous().numpy() metadata.frames_indices = indices return video, metadata def read_video_torchcodec( video_path: str, sample_indices_fn: Callable, **kwargs, ): """ Decode the video with torchcodec decoder. Args: video_path (`str`): Path to the video file. sample_indices_fn (`Callable`, *optional*): A callable function that will return indices at which the video should be sampled. If the video has to be loaded using by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`. If not provided, simple uniform sampling with fps is performed. Example: def sample_indices_fn(metadata, **kwargs): return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int) Returns: Tuple[`torch.Tensor`, `VideoMetadata`]: A tuple containing: - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]). - `VideoMetadata` object. """ # Lazy import torchcodec requires_backends(read_video_torchcodec, ["torchcodec"]) from torchcodec.decoders import VideoDecoder decoder = VideoDecoder( video_path, dimension_order="NHWC", # to be consistent with other decoders # Interestingly `exact` mode takes less than approximate when we load the whole video seek_mode="exact", ) metadata = VideoMetadata( total_num_frames=decoder.metadata.num_frames, fps=decoder.metadata.average_fps, duration=decoder.metadata.duration_seconds, video_backend="torchcodec", ) indices = sample_indices_fn(metadata=metadata, **kwargs) video = decoder.get_frames_at(indices=indices).data.contiguous() metadata.frames_indices = indices return video, metadata VIDEO_DECODERS = { "decord": read_video_decord, "opencv": read_video_opencv, "pyav": read_video_pyav, "torchvision": read_video_torchvision, "torchcodec": read_video_torchcodec, } def load_video( video: Union[str, "VideoInput"], num_frames: Optional[int] = None, fps: Optional[Union[int, float]] = None, backend: str = "pyav", sample_indices_fn: Optional[Callable] = None, **kwargs, ) -> np.array: """ Loads `video` to a numpy array. Args: video (`str` or `VideoInput`): The video to convert to the numpy array format. Can be a link to video or local path. num_frames (`int`, *optional*): Number of frames to sample uniformly. If not passed, the whole video is loaded. fps (`int` or `float`, *optional*): Number of frames to sample per second. Should be passed only when `num_frames=None`. If not specified and `num_frames==None`, all frames are sampled. backend (`str`, *optional*, defaults to `"pyav"`): The backend to use when loading the video. Can be any of ["decord", "pyav", "opencv", "torchvision", "torchcodec"]. Defaults to "pyav". sample_indices_fn (`Callable`, *optional*): A callable function that will return indices at which the video should be sampled. If the video has to be loaded using by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`. If not provided, simple uniformt sampling with fps is performed, otherwise `sample_indices_fn` has priority over other args. The function expects at input the all args along with all kwargs passed to `load_video` and should output valid indices at which the video should be sampled. For example: Example: def sample_indices_fn(metadata, **kwargs): return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int) Returns: tuple[`np.array`, Dict]: A tuple containing: - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]). - Metadata dictionary. """ # If `sample_indices_fn` is given, we can accept any args as those might be needed by custom `sample_indices_fn` if fps is not None and num_frames is not None and sample_indices_fn is None: raise ValueError( "`num_frames`, `fps`, and `sample_indices_fn` are mutually exclusive arguments, please use only one!" ) # If user didn't pass a sampling function, create one on the fly with default logic if sample_indices_fn is None: def sample_indices_fn_func(metadata, **fn_kwargs): return default_sample_indices_fn(metadata, num_frames=num_frames, fps=fps, **fn_kwargs) sample_indices_fn = sample_indices_fn_func if is_valid_image(video) or (isinstance(video, (list, tuple)) and is_valid_image(video[0])): # Case 1: Video is provided as a 4D numpy array or torch tensor (frames, height, width, channels) if not is_valid_video(video): raise ValueError( f"When passing video as decoded frames, video should be a 4D numpy array or torch tensor, but got {video.ndim} dimensions instead." ) return video, None if urlparse(video).netloc in ["www.youtube.com", "youtube.com"]: if not is_yt_dlp_available(): raise ImportError("To load a video from YouTube url you have to install `yt_dlp` first.") # Lazy import from yt_dlp requires_backends(load_video, ["yt_dlp"]) from yt_dlp import YoutubeDL buffer = BytesIO() with redirect_stdout(buffer), YoutubeDL() as f: f.download([video]) bytes_obj = buffer.getvalue() file_obj = BytesIO(bytes_obj) elif video.startswith("http://") or video.startswith("https://"): file_obj = BytesIO(requests.get(video).content) elif os.path.isfile(video): file_obj = video else: raise TypeError("Incorrect format used for video. Should be an url linking to an video or a local path.") # can also load with decord, but not cv2/torchvision # both will fail in case of url links video_is_url = video.startswith("http://") or video.startswith("https://") if video_is_url and backend in ["opencv", "torchvision"]: raise ValueError( "If you are trying to load a video from URL, you can decode the video only with `pyav`, `decord` or `torchcodec` as backend" ) if file_obj is None: return video if ( (not is_decord_available() and backend == "decord") or (not is_av_available() and backend == "pyav") or (not is_cv2_available() and backend == "opencv") or (not is_torchvision_available() and backend == "torchvision") or (not is_torchcodec_available() and backend == "torchcodec") ): raise ImportError( f"You chose backend={backend} for loading the video but the required library is not found in your environment " f"Make sure to install {backend} before loading the video." ) video_decoder = VIDEO_DECODERS[backend] video, metadata = video_decoder(file_obj, sample_indices_fn, **kwargs) return video, metadata def convert_to_rgb( video: np.array, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.array: """ Convert video to RGB by blending the transparency layer if it's in RGBA format, otherwise simply returns it. Args: video (`np.array`): The video to convert. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output video. If unset, will use the inferred format from the input. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input video. If unset, will use the inferred format from the input. """ if not isinstance(video, np.ndarray): raise TypeError(f"Video has to be a numpy array to convert to RGB format, but found {type(video)}") # np.array usually comes with ChannelDimension.LAST so leet's convert it if input_data_format is None: input_data_format = infer_channel_dimension_format(video) video = to_channel_dimension_format(video, ChannelDimension.FIRST, input_channel_dim=input_data_format) # 3 channels for RGB already if video.shape[-3] == 3: return video # Grayscale video so we repeat it 3 times for each channel if video.shape[-3] == 1: return video.repeat(3, -3) if not (video[..., 3, :, :] < 255).any(): return video # There is a transparency layer, blend it with a white background. # Calculate the alpha proportion for blending. alpha = video[..., 3, :, :] / 255.0 video = (1 - alpha[..., None, :, :]) * 255 + alpha[..., None, :, :] * video[..., 3, :, :] return video def pad( video: np.ndarray, padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]], mode: PaddingMode = PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]] = 0.0, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pads the `video` with the specified (height, width) `padding` and `mode`. Args: video (`np.ndarray`): The video to pad. padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`): Padding to apply to the edges of the height, width axes. Can be one of three formats: - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis. - `((before, after),)` yields same before and after pad for height and width. - `(pad,)` or int is a shortcut for before = after = pad width for all axes. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output video. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_frames, num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: video in (num_frames, height, width, num_channels) format. If unset, will use same as the input video. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input video. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_frames, num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: video in (num_frames, height, width, num_channels) format. If unset, will use the inferred format of the input video. Returns: `np.ndarray`: The padded video. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(video) def _expand_for_data_format(values): """ Convert values to be in the format expected by np.pad based on the data format. """ if isinstance(values, (int, float)): values = ((values, values), (values, values)) elif isinstance(values, tuple) and len(values) == 1: values = ((values[0], values[0]), (values[0], values[0])) elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int): values = (values, values) elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple): pass else: raise ValueError(f"Unsupported format: {values}") # add 0 for channel dimension values = ( ((0, 0), (0, 0), *values) if input_data_format == ChannelDimension.FIRST else ((0, 0), *values, (0, 0)) ) # Add additional padding if there's a batch dimension values = (0, *values) if video.ndim == 5 else values return values padding_map = { PaddingMode.CONSTANT: "constant", PaddingMode.REFLECT: "reflect", PaddingMode.REPLICATE: "replicate", PaddingMode.SYMMETRIC: "symmetric", } padding = _expand_for_data_format(padding) pad_kwargs = {} if mode not in padding_map: raise ValueError(f"Invalid padding mode: {mode}") elif mode == PaddingMode.CONSTANT: pad_kwargs["constant_values"] = _expand_for_data_format(constant_values) video = np.pad(video, padding, mode=padding_map[mode], **pad_kwargs) video = to_channel_dimension_format(video, data_format, input_data_format) if data_format is not None else video return video def group_videos_by_shape( videos: list["torch.Tensor"], ) -> tuple[dict[tuple[int, int], list["torch.Tensor"]], dict[int, tuple[tuple[int, int], int]]]: """ Groups videos by shape. Returns a dictionary with the shape as key and a list of videos with that shape as value, and a dictionary with the index of the video in the original list as key and the shape and index in the grouped list as value. """ grouped_videos = {} grouped_videos_index = {} for i, video in enumerate(videos): shape = video.shape[-2::] num_frames = video.shape[-4] # video format BTCHW shape = (num_frames, *shape) if shape not in grouped_videos: grouped_videos[shape] = [] grouped_videos[shape].append(video) grouped_videos_index[i] = (shape, len(grouped_videos[shape]) - 1) # stack videos with the same size and number of frames grouped_videos = {shape: torch.stack(videos, dim=0) for shape, videos in grouped_videos.items()} return grouped_videos, grouped_videos_index def reorder_videos( processed_videos: dict[tuple[int, int], "torch.Tensor"], grouped_videos_index: dict[int, tuple[int, int]] ) -> list["torch.Tensor"]: """ Reconstructs a list of videos in the original order. """ return [ processed_videos[grouped_videos_index[i][0]][grouped_videos_index[i][1]] for i in range(len(grouped_videos_index)) ]
transformers/src/transformers/video_utils.py/0
{ "file_path": "transformers/src/transformers/video_utils.py", "repo_id": "transformers", "token_count": 11972 }
555
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Aria model.""" import unittest import pytest import requests from transformers import ( AriaConfig, AriaForConditionalGeneration, AriaModel, AriaTextConfig, AutoProcessor, AutoTokenizer, BitsAndBytesConfig, is_torch_available, is_vision_available, ) from transformers.models.idefics3 import Idefics3VisionConfig from transformers.testing_utils import ( Expectations, cleanup, require_bitsandbytes, require_torch, require_torch_large_accelerator, require_vision, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch if is_vision_available(): from PIL import Image # Used to be https://aria-vl.github.io/static/images/view.jpg but it was removed, llava-vl has the same image IMAGE_OF_VIEW_URL = "https://llava-vl.github.io/static/images/view.jpg" class AriaVisionText2TextModelTester: def __init__( self, parent, ignore_index=-100, image_token_index=9, projector_hidden_act="gelu", seq_length=7, vision_feature_select_strategy="default", vision_feature_layer=-1, text_config=AriaTextConfig( seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=1, hidden_size=32, intermediate_size=64, max_position_embeddings=60, model_type="aria_moe_lm", moe_intermediate_size=4, moe_num_experts=4, moe_topk=2, num_attention_heads=8, num_experts_per_tok=3, num_hidden_layers=2, num_key_value_heads=8, rope_theta=5000000, vocab_size=99, eos_token_id=2, head_dim=4, ), is_training=True, vision_config=Idefics3VisionConfig( image_size=358, patch_size=10, num_channels=3, is_training=True, hidden_size=32, projection_dim=20, num_hidden_layers=2, num_attention_heads=16, intermediate_size=10, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, ), ): self.parent = parent self.ignore_index = ignore_index self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.pad_token_id = text_config.pad_token_id self.eos_token_id = text_config.eos_token_id self.num_hidden_layers = text_config.num_hidden_layers self.vocab_size = text_config.vocab_size self.hidden_size = text_config.hidden_size self.num_attention_heads = text_config.num_attention_heads self.is_training = is_training self.batch_size = 10 self.num_channels = 3 self.image_size = 358 self.num_image_tokens = 128 self.seq_length = seq_length + self.num_image_tokens def get_config(self): return AriaConfig( text_config=self.text_config.to_dict(), vision_config=self.vision_config.to_dict(), ignore_index=self.ignore_index, image_token_index=self.image_token_index, projector_hidden_act=self.projector_hidden_act, vision_feature_select_strategy=self.vision_feature_select_strategy, vision_feature_layer=self.vision_feature_layer, eos_token_id=self.eos_token_id, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.vision_config.num_channels, self.vision_config.image_size, self.vision_config.image_size, ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(1).to(torch_device) input_ids[input_ids == config.image_token_index] = self.pad_token_id input_ids[:, : self.num_image_tokens] = config.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @slow @require_torch class AriaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `AriaForConditionalGeneration`. """ all_model_classes = (AriaModel, AriaForConditionalGeneration) if is_torch_available() else () test_pruning = False test_head_masking = False test_torchscript = False _is_composite = True def setUp(self): self.model_tester = AriaVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=AriaConfig, has_text_modality=False) @unittest.skip( reason="This architecture seems to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seems to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seems to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Compile not yet supported because in LLava models") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass @unittest.skip(reason="Compile not yet supported because in LLava models") def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip(reason="Feedforward chunking is not yet supported") def test_feed_forward_chunking(self): pass @unittest.skip(reason="Unstable test") def test_initialization(self): pass @unittest.skip(reason="Unstable test") def test_dola_decoding_sample(self): pass @unittest.skip(reason="Dynamic control flow due to MoE") def test_generate_with_static_cache(self): pass @unittest.skip(reason="Dynamic control flow due to MoE") def test_generate_from_inputs_embeds_with_static_cache(self): pass @unittest.skip(reason="Aria uses nn.MHA which is not compatible with offloading") def test_cpu_offload(self): pass @unittest.skip(reason="Aria uses nn.MHA which is not compatible with offloading") def test_disk_offload_bin(self): pass @unittest.skip(reason="Aria uses nn.MHA which is not compatible with offloading") def test_disk_offload_safetensors(self): pass SKIP = False torch_accelerator_module = getattr(torch, torch_device) memory = 23 # skip on T4 / A10 if hasattr(torch_accelerator_module, "get_device_properties"): if torch_accelerator_module.get_device_properties(0).total_memory / 1024**3 < memory: SKIP = True @unittest.skipIf(SKIP, reason="A10 doesn't have enough GPU memory for this tests") @require_torch class AriaForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("rhymes-ai/Aria") cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @slow @require_torch_large_accelerator @require_bitsandbytes def test_small_model_integration_test(self): # Let's make sure we test the preprocessing to replace what is used model = AriaForConditionalGeneration.from_pretrained( "rhymes-ai/Aria", quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]), ) prompt = "<|img|>\nUSER: What are the things I should be cautious about when I visit this place?\nASSISTANT:" raw_image = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw) inputs = self.processor(images=raw_image, text=prompt, return_tensors="pt").to(model.device, model.dtype) non_img_tokens = [ 109, 3905, 2000, 93415, 4551, 1162, 901, 3894, 970, 2478, 1017, 19312, 2388, 1596, 1809, 970, 5449, 1235, 3333, 93483, 109, 61081, 11984, 14800, 93415 ] # fmt: skip EXPECTED_INPUT_IDS = torch.tensor([[9] * 256 + non_img_tokens]).to(inputs["input_ids"].device) self.assertTrue(torch.equal(inputs["input_ids"], EXPECTED_INPUT_IDS)) output = model.generate(**inputs, max_new_tokens=20) decoded_output = self.processor.decode(output[0], skip_special_tokens=True) expected_output = Expectations( { ( "cuda", None, ): "\nUSER: What are the things I should be cautious about when I visit this place?\nASSISTANT: When visiting this place, there are a few things one should be cautious about. Firstly,", ( "rocm", (9, 5), ): "\n USER: What are the things I should be cautious about when I visit this place?\n ASSISTANT: When you visit this place, you should be cautious about the following things:\n\n- The", } ).get_expectation() self.assertEqual(decoded_output, expected_output) @slow @require_torch_large_accelerator @require_bitsandbytes def test_small_model_integration_test_llama_single(self): # Let's make sure we test the preprocessing to replace what is used model_id = "rhymes-ai/Aria" model = AriaForConditionalGeneration.from_pretrained( model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]), ) processor = AutoProcessor.from_pretrained(model_id) prompt = "USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? ASSISTANT:" raw_image = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw) inputs = processor(images=raw_image, text=prompt, return_tensors="pt").to(model.device, model.dtype) output = model.generate(**inputs, max_new_tokens=90, do_sample=False) EXPECTED_DECODED_TEXT = Expectations( { ("cuda", (8, 0)): "USER: \n What are the things I should be cautious about when I visit this place? ASSISTANT: When visiting this beautiful location, it's important to be mindful of a few things to ensure both your safety and the preservation of the environment. Firstly, always be cautious when walking on the wooden pier, as it can be slippery, especially during or after rain. Secondly, be aware of the local wildlife and do not feed or disturb them. Lastly, respect the natural surroundings by not littering and sticking to", ("rocm", (9, 5)): "USER: \n What are the things I should be cautious about when I visit this place? ASSISTANT: \n\nWhen visiting this place, you should be cautious about the following:\n\n1. **Weather Conditions**: The weather can be unpredictable, so it's important to check the forecast and dress in layers. Sudden changes in weather can occur, so be prepared for rain or cold temperatures.\n\n2. **Safety on the Dock**: The dock may be slippery, especially when", } ).get_expectation() # fmt: off decoded_output = processor.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual( decoded_output, EXPECTED_DECODED_TEXT, f"Expected: {repr(EXPECTED_DECODED_TEXT)}\nActual: {repr(decoded_output)}", ) @slow @require_torch_large_accelerator @require_bitsandbytes def test_small_model_integration_test_llama_batched(self): # Let's make sure we test the preprocessing to replace what is used model_id = "rhymes-ai/Aria" model = AriaForConditionalGeneration.from_pretrained( model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]), ) processor = AutoProcessor.from_pretrained(model_id) prompts = [ "USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? What should I bring with me? ASSISTANT:", "USER: <|img|>\nWhat is this? ASSISTANT:", ] image1 = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw) image2 = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = processor(images=[image1, image2], text=prompts, return_tensors="pt", padding=True).to( model.device, model.dtype ) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = Expectations( { ("cuda", None): [ "USER: \nWhat are the things I should be cautious about when I visit this place? What should I bring with me? ASSISTANT: When visiting this place, which is a pier or dock extending over a body of water, you", "USER: \nWhat is this? ASSISTANT: The image features two cats lying down on a pink couch. One cat is located on", ], ("rocm", (9, 5)): [ "USER: \n What are the things I should be cautious about when I visit this place? What should I bring with me? ASSISTANT: \n\nWhen visiting this place, you should be cautious about the weather conditions, as it", "USER: \n What is this? ASSISTANT: This is a picture of two cats sleeping on a couch. USER: What is the color of", ], } ).get_expectation() decoded_output = processor.batch_decode(output, skip_special_tokens=True) self.assertEqual(decoded_output, EXPECTED_DECODED_TEXT) @slow @require_torch_large_accelerator @require_bitsandbytes def test_small_model_integration_test_batch(self): # Let's make sure we test the preprocessing to replace what is used model = AriaForConditionalGeneration.from_pretrained( "rhymes-ai/Aria", quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]), ) # The first batch is longer in terms of text, but only has 1 image. The second batch will be padded in text, but the first will be padded because images take more space!. prompts = [ "USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT:", "USER: <|img|>\nWhat is this?\nASSISTANT:", ] image1 = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw) image2 = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = self.processor(images=[image1, image2], text=prompts, return_tensors="pt", padding=True).to( model.device, model.dtype ) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = Expectations({ ("cuda", None): [ 'USER: \nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT: When visiting this place, there are a few things to be cautious about and items to bring.', 'USER: \nWhat is this?\nASSISTANT: Cats', ], ("rocm", (9, 5)): [ 'USER: \n What are the things I should be cautious about when I visit this place? What should I bring with me?\n ASSISTANT: \n\nWhen visiting this place, you should be cautious about the following:\n\n-', 'USER: \n What is this?\n ASSISTANT: This is a picture of two cats sleeping on a couch. The couch is red, and the cats', ], }).get_expectation() # fmt: skip decoded_output = self.processor.batch_decode(output, skip_special_tokens=True) self.assertEqual(decoded_output, EXPECTED_DECODED_TEXT) @slow @require_torch_large_accelerator @require_bitsandbytes def test_small_model_integration_test_llama_batched_regression(self): # Let's make sure we test the preprocessing to replace what is used model_id = "rhymes-ai/Aria" # Multi-image & multi-prompt (e.g. 3 images and 2 prompts now fails with SDPA, this tests if "eager" works as before) model = AriaForConditionalGeneration.from_pretrained( model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]), ) processor = AutoProcessor.from_pretrained(model_id, pad_token="<pad>") prompts = [ "USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT:", "USER: <|img|>\nWhat is this?\nASSISTANT: Two cats lying on a bed!\nUSER: <|img|>\nAnd this?\nASSISTANT:", ] image1 = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw) image2 = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = processor(images=[image1, image2, image1], text=prompts, return_tensors="pt", padding=True) inputs = inputs.to(model.device, model.dtype) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = Expectations({ ("cuda", None): ['USER: \nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT: When visiting this place, which appears to be a dock or pier extending over a body of water', 'USER: \nWhat is this?\nASSISTANT: Two cats lying on a bed!\nUSER: \nAnd this?\nASSISTANT: A cat sleeping on a bed.'], ("rocm", (9, 5)): ['USER: \n What are the things I should be cautious about when I visit this place? What should I bring with me?\n ASSISTANT: \n\nWhen visiting this place, you should be cautious about the weather conditions, as it', 'USER: \n What is this?\n ASSISTANT: Two cats lying on a bed!\n USER: \n And this?\n ASSISTANT: A serene lake scene with a wooden dock extending into the water.\n USER: \n'] }).get_expectation() # fmt: skip decoded_output = processor.batch_decode(output, skip_special_tokens=True) self.assertEqual(decoded_output, EXPECTED_DECODED_TEXT) @slow @require_torch_large_accelerator @require_vision @require_bitsandbytes def test_batched_generation(self): # Skip multihead_attn for 4bit because MHA will read the original weight without dequantize. # See https://github.com/huggingface/transformers/pull/37444#discussion_r2045852538. model = AriaForConditionalGeneration.from_pretrained( "rhymes-ai/Aria", quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]), ) processor = AutoProcessor.from_pretrained("rhymes-ai/Aria") prompt1 = "<image>\n<image>\nUSER: What's the difference of two images?\nASSISTANT:" prompt2 = "<image>\nUSER: Describe the image.\nASSISTANT:" prompt3 = "<image>\nUSER: Describe the image.\nASSISTANT:" url1 = "https://images.unsplash.com/photo-1552053831-71594a27632d?q=80&w=3062&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D" url2 = "https://images.unsplash.com/photo-1617258683320-61900b281ced?q=80&w=3087&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D" image1 = Image.open(requests.get(url1, stream=True).raw) image2 = Image.open(requests.get(url2, stream=True).raw) # Create inputs messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": prompt1}, {"type": "image"}, {"type": "text", "text": prompt2}, ], }, { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": prompt3}, ], }, ] prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages] images = [[image1, image2], [image2]] inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to( device=model.device, dtype=model.dtype ) EXPECTED_OUTPUTS = Expectations( { ("cpu", None): [ "<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image features a cute, light-colored puppy sitting on a paved surface with", "<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young alpaca standing on a grassy hill. The alpaca has", ], ("cuda", None): [ "<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image features a cute, light-colored puppy sitting on a paved surface with", "<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young alpaca standing on a patch of ground with some dry grass. The", ], ("xpu", 3): [ "<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image features a cute, light-colored puppy sitting on a paved surface with", "<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young alpaca standing on a patch of ground with some dry grass. The", ], ("rocm", (9, 5)): [ "<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image shows a cute golden retriever puppy sitting on a paved surface with a stick", '<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young llama standing on a patch of ground with some dry grass and dirt. The' ], } ) # fmt: skip EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation() generate_ids = model.generate(**inputs, max_new_tokens=20) outputs = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual(outputs, EXPECTED_OUTPUT) def test_tokenizer_integration(self): model_id = "rhymes-ai/Aria" slow_tokenizer = AutoTokenizer.from_pretrained( model_id, bos_token="<|startoftext|>", eos_token="<|endoftext|>", use_fast=False ) slow_tokenizer.add_tokens("<image>", True) fast_tokenizer = AutoTokenizer.from_pretrained( model_id, bos_token="<|startoftext|>", eos_token="<|endoftext|>", from_slow=True, legacy=False, ) fast_tokenizer.add_tokens("<image>", True) prompt = "<|startoftext|><|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|>" EXPECTED_OUTPUT = ['<|startoftext|>', '<', '|', 'im', '_', 'start', '|', '>', 'system', '\n', 'Answer', '▁the', '▁questions', '.<', '|', 'im', '_', 'end', '|', '><', '|', 'im', '_', 'start', '|', '>', 'user', '\n', '<image>', '\n', 'What', '▁is', '▁shown', '▁in', '▁this', '▁image', '?', '<', '|', 'im', '_', 'end', '|', '>'] # fmt: skip self.assertEqual(slow_tokenizer.tokenize(prompt), EXPECTED_OUTPUT) self.assertEqual(fast_tokenizer.tokenize(prompt), EXPECTED_OUTPUT) @slow @require_torch_large_accelerator @require_bitsandbytes def test_generation_no_images(self): model_id = "rhymes-ai/Aria" model = AriaForConditionalGeneration.from_pretrained( model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]), ) processor = AutoProcessor.from_pretrained(model_id) assert model.device.type == "cuda", "This test is only supported on CUDA" # TODO: remove this # Prepare inputs with no images inputs = processor(text="Hello, I am", return_tensors="pt").to(torch_device) # Make sure that `generate` works _ = model.generate(**inputs, max_new_tokens=20)
transformers/tests/models/aria/test_modeling_aria.py/0
{ "file_path": "transformers/tests/models/aria/test_modeling_aria.py", "repo_id": "transformers", "token_count": 11918 }
556
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch GotOcr2 model.""" import unittest import pytest from parameterized import parameterized from transformers import ( AutoProcessor, AyaVisionConfig, is_torch_available, ) from transformers.testing_utils import ( Expectations, cleanup, get_device_properties, require_deterministic_for_xpu, require_read_token, require_torch, require_torch_accelerator, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AyaVisionForConditionalGeneration, AyaVisionModel, ) class AyaVisionVisionText2TextModelTester: def __init__( self, parent, batch_size=3, seq_length=7, vision_feature_layer=-1, downsample_factor=2, ignore_index=-100, bos_token_id=0, eos_token_id=0, pad_token_id=0, image_token_index=2, num_channels=3, image_size=64, model_type="aya_vision", is_training=True, text_config={ "model_type": "cohere2", "vocab_size": 99, "hidden_size": 128, "intermediate_size": 37, "num_hidden_layers": 4, "num_attention_heads": 4, "output_channels": 64, "hidden_act": "silu", "max_position_embeddings": 512, "tie_word_embeddings": True, "bos_token_id": 0, "eos_token_id": 0, "pad_token_id": 0, }, vision_config={ "model_type": "siglip_vision_model", "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 128, "image_size": 64, "patch_size": 8, "vision_use_head": False, }, ): self.parent = parent self.ignore_index = ignore_index self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.image_token_index = image_token_index self.model_type = model_type self.text_config = text_config self.vision_config = vision_config self.batch_size = batch_size self.vision_feature_layer = vision_feature_layer self.downsample_factor = downsample_factor self.is_training = is_training self.num_channels = num_channels self.image_size = image_size self.image_seq_length = (image_size // (vision_config["patch_size"] * downsample_factor)) ** 2 self.seq_length = seq_length + self.image_seq_length self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] def get_config(self): return AyaVisionConfig( text_config=self.text_config, vision_config=self.vision_config, model_type=self.model_type, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, image_token_index=self.image_token_index, vision_feature_layer=self.vision_feature_layer, downsample_factor=self.downsample_factor, ) def prepare_config_and_inputs(self): config = self.get_config() pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) # input_ids[:, -1] = self.pad_token_id input_ids[input_ids == self.image_token_index] = self.pad_token_id input_ids[:, : self.image_seq_length] = self.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class AyaVisionModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( AyaVisionModel, AyaVisionForConditionalGeneration, ) if is_torch_available() else () ) all_generative_model_classes = (AyaVisionForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "image-text-to-text": AyaVisionForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_torchscript = False test_head_masking = False _is_composite = True def setUp(self): self.model_tester = AyaVisionVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=AyaVisionConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip("Failing because of unique cache (HybridCache)") def test_model_outputs_equivalence(self, **kwargs): pass @unittest.skip("Cohere2's forcefully disables sdpa due to softcapping") def test_sdpa_can_dispatch_non_composite_models(self): pass @unittest.skip("Cohere2's eager attn/sdpa attn outputs are expected to be different") def test_eager_matches_sdpa_generate(self): pass @parameterized.expand([("random",), ("same",)]) @pytest.mark.generate @unittest.skip("Cohere2 has HybridCache which is not compatible with assisted decoding") def test_assisted_decoding_matches_greedy_search(self, assistant_type): pass @unittest.skip("Cohere2 has HybridCache which is not compatible with assisted decoding") def test_prompt_lookup_decoding_matches_greedy_search(self, assistant_type): pass @pytest.mark.generate @unittest.skip("Cohere2 has HybridCache which is not compatible with assisted decoding") def test_assisted_decoding_sample(self): pass @unittest.skip("Cohere2 has HybridCache which is not compatible with dola decoding") def test_dola_decoding_sample(self): pass @unittest.skip("Cohere2 has HybridCache and doesn't support continue from past kv") def test_generate_continue_from_past_key_values(self): pass @unittest.skip("Cohere2 has HybridCache and doesn't support low_memory generation") def test_beam_search_low_memory(self): pass @unittest.skip("Cohere2 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate(self): pass @unittest.skip("Cohere2 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip("Cohere2 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate_low_memory(self): pass @unittest.skip("Cohere2 has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.") def test_generate_with_static_cache(self): pass @unittest.skip("Cohere2 has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.") def test_generate_from_inputs_embeds_with_static_cache(self): pass @unittest.skip("Failing because of unique cache (HybridCache)") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @unittest.skip(reason="Compile not yet supported because in LLava models") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass # todo: yoni - fix or improve the test @unittest.skip("Difference is slightly higher than the threshold") def test_batching_equivalence(self): pass @require_read_token @require_torch class AyaVisionIntegrationTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_checkpoint = "CohereForAI/aya-vision-8b" cls.model = None @classmethod def tearDownClass(cls): del cls.model cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @classmethod def get_model(cls): # Use 4-bit on T4 device_type, major, _ = get_device_properties() load_in_4bit = (device_type == "cuda") and (major < 8) dtype = None if load_in_4bit else torch.float16 if cls.model is None: cls.model = AyaVisionForConditionalGeneration.from_pretrained( cls.model_checkpoint, device_map=torch_device, dtype=dtype, load_in_4bit=load_in_4bit, ) return cls.model @slow @require_torch_accelerator def test_small_model_integration_forward(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) model = self.get_model() messages = [ { "role": "user", "content": [ {"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"}, {"type": "text", "text": "Please describe the image explicitly."}, ], } ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(torch_device, dtype=torch.float16) # Forward with torch.inference_mode(): output = model(**inputs) actual_logits = output.logits[0, -1, :5].cpu() EXPECTED_LOGITS = Expectations( { ("xpu", 3): [0.4109, 0.1532, 0.8018, 2.1328, 0.5483], # 4-bit ("cuda", 7): [0.1097, 0.3481, 3.8340, 9.7969, 2.0488], ("cuda", 8): [1.6396, 0.6094, 3.1992, 8.5234, 2.1875], } ) # fmt: skip expected_logits = torch.tensor(EXPECTED_LOGITS.get_expectation(), dtype=torch.float16) self.assertTrue( torch.allclose(actual_logits, expected_logits, atol=0.1), f"Actual logits: {actual_logits}" f"\nExpected logits: {expected_logits}" f"\nDifference: {torch.abs(actual_logits - expected_logits)}", ) @slow @require_torch_accelerator @require_deterministic_for_xpu def test_small_model_integration_generate_text_only(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) model = self.get_model() messages = [ { "role": "user", "content": [ {"type": "text", "text": "Write a haiku"}, ], } ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(torch_device, dtype=torch.float16) with torch.no_grad(): generate_ids = model.generate(**inputs, max_new_tokens=25, do_sample=False) decoded_output = processor.decode( generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True ) expected_outputs = Expectations( { ("xpu", 3): "Whispers on the breeze,\nLeaves dance under moonlit skies,\nNature's quiet song.", # 4-bit ("cuda", 7): "Sure, here's a haiku for you:\n\nMorning dew sparkles,\nPetals unfold in sunlight,\n", ("cuda", 8): "Whispers on the breeze,\nLeaves dance under moonlit skies,\nNature's quiet song.", } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual(decoded_output, expected_output) @slow @require_torch_accelerator @require_deterministic_for_xpu def test_small_model_integration_generate_chat_template(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) model = self.get_model() messages = [ { "role": "user", "content": [ {"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"}, {"type": "text", "text": "Please describe the image explicitly."}, ], } ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(torch_device, dtype=torch.float16) with torch.no_grad(): generate_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False) decoded_output = processor.decode( generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True ) expected_outputs = Expectations( { ("xpu", 3): 'The image depicts a cozy scene of two cats resting on a bright pink blanket. The cats,', # 4-bit ("cuda", 7): 'The image depicts two cats comfortably resting on a pink blanket spread across a sofa. The cats,', ("cuda", 8): 'The image depicts a cozy scene of two cats resting on a bright pink blanket. The cats,', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual(decoded_output, expected_output) @slow @require_torch_accelerator def test_small_model_integration_batched_generate(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) model = self.get_model() # Prepare inputs messages = [ [ { "role": "user", "content": [ {"type": "image", "url": "https://llava-vl.github.io/static/images/view.jpg"}, {"type": "text", "text": "Write a haiku for this image"}, ], }, ], [ { "role": "user", "content": [ {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"}, {"type": "text", "text": "Describe this image"}, ], }, ], ] inputs = processor.apply_chat_template( messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(model.device, dtype=torch.float16) output = model.generate(**inputs, do_sample=False, max_new_tokens=25) # Check first output decoded_output = processor.decode(output[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): "Wooden path to water,\nMountains echo in stillness,\nPeaceful forest lake.", # 4-bit ("cuda", 7): "Wooden bridge stretches\nMirrored lake below, mountains rise\nPeaceful, serene", ("cuda", 8): 'Wooden path to water,\nMountains echo in stillness,\nPeaceful forest scene.', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) # Check second output decoded_output = processor.decode(output[1, inputs["input_ids"].shape[1] :], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): 'This image captures a vibrant street scene in a bustling urban area, likely in an Asian city. The focal point is a', # 4-bit ("cuda", 7): 'This vibrant image captures a bustling street scene in a multicultural urban area, featuring a traditional Chinese gate adorned with intricate red and', ("cuda", 8): 'This image captures a vibrant street scene in a bustling urban area, likely in an Asian city. The focal point is a', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) @slow @require_torch_accelerator @require_deterministic_for_xpu def test_small_model_integration_batched_generate_multi_image(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) model = self.get_model() # Prepare inputs messages = [ [ { "role": "user", "content": [ {"type": "image", "url": "https://llava-vl.github.io/static/images/view.jpg"}, {"type": "text", "text": "Write a haiku for this image"}, ], }, ], [ { "role": "user", "content": [ { "type": "image", "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg", }, { "type": "image", "url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg", }, { "type": "text", "text": "These images depict two different landmarks. Can you identify them?", }, ], }, ], ] inputs = processor.apply_chat_template( messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(model.device, dtype=torch.float16) output = model.generate(**inputs, do_sample=False, max_new_tokens=25) # Check first output decoded_output = processor.decode(output[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True) # Batching seems to alter the output slightly, but it is also the case in the original implementation. This seems to be expected: https://github.com/huggingface/transformers/issues/23017#issuecomment-1649630232 expected_outputs = Expectations( { ("xpu", 3): "Wooden path to water,\nMountains echo in stillness,\nPeaceful forest lake.", ("cuda", 7): 'Wooden bridge stretches\nMirrored lake below, mountains rise\nPeaceful, serene', ("cuda", 8): 'Wooden path to water,\nMountains echo in stillness,\nPeaceful forest scene.', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) # Check second output decoded_output = processor.decode(output[1, inputs["input_ids"].shape[1] :], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): "The first image showcases the Statue of Liberty, a colossal neoclassical sculpture on Liberty Island in New York Harbor. Standing at ", ("cuda", 7): 'The first image showcases the Statue of Liberty, a monumental sculpture located on Liberty Island in New York Harbor. Standing atop a', ("cuda", 8): 'The first image showcases the Statue of Liberty, a colossal neoclassical sculpture on Liberty Island in New York Harbor. Standing at ', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", )
transformers/tests/models/aya_vision/test_modeling_aya_vision.py/0
{ "file_path": "transformers/tests/models/aya_vision/test_modeling_aya_vision.py", "repo_id": "transformers", "token_count": 10125 }
557
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch BEiT model.""" import unittest import pytest from datasets import load_dataset from transformers import BeitConfig from transformers.testing_utils import ( require_torch, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import ( cached_property, is_torch_available, is_vision_available, ) from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( BeitBackbone, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class BeitModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=4, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[1, 2, 3, 4], out_features=["stage1", "stage2", "stage3", "stage4"], attn_implementation="eager", mask_ratio=0.5, ): self.parent = parent self.vocab_size = vocab_size self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.out_features = out_features self.num_labels = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.mask_length = self.seq_length - 1 self.num_masks = int(mask_ratio * self.seq_length) self.attn_implementation = attn_implementation def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, out_features=self.out_features, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = BeitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_backbone(self, config, pixel_values, labels, pixel_labels): model = BeitBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify hidden states self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) expected_height = expected_width = self.image_size // config.patch_size self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, expected_height, expected_width] ) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) # verify backbone works with out_features=None config.out_features = None model = BeitBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, expected_height, expected_width] ) # verify channels self.parent.assertEqual(len(model.channels), 1) def create_and_check_for_masked_lm(self, config, pixel_values, labels, pixel_labels): model = BeitForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = BeitForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = BeitForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = BeitForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as BEiT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "image-feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_torch_exportable = True def setUp(self): self.model_tester = BeitModelTester(self) self.config_tester = ConfigTester(self, config_class=BeitConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds") def test_inputs_embeds(self): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="BEiT does not support feedforward chunking yet") def test_feed_forward_chunking(self): pass @unittest.skip(reason="BEiT can't compile dynamic") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class.__name__ in [ *MODEL_MAPPING_NAMES.values(), *MODEL_FOR_BACKBONE_MAPPING_NAMES.values(), "BeitForMaskedImageModeling", ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class.__name__ in [ *MODEL_MAPPING_NAMES.values(), *MODEL_FOR_BACKBONE_MAPPING_NAMES.values(), "BeitForMaskedImageModeling", ] or not model_class.supports_gradient_checkpointing ): continue model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def test_model_from_pretrained(self): model_name = "microsoft/beit-base-patch16-224" model = BeitModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class BeitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224") if is_vision_available() else None @slow def test_inference_masked_image_modeling_head(self): model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k").to(torch_device) image_processor = self.default_image_processor image = prepare_img() pixel_values = image_processor(images=image, return_tensors="pt").pixel_values.to(torch_device) # prepare bool_masked_pos bool_masked_pos = torch.ones((1, 196), dtype=torch.bool).to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 196, 8192)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(torch_device) torch.testing.assert_close(logits[bool_masked_pos][:3, :3], expected_slice, rtol=1e-2, atol=1e-2) @slow def test_inference_image_classification_head_imagenet_1k(self): model = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([-1.2385, -1.0987, -1.0108]).to(torch_device) torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) expected_class_idx = 281 self.assertEqual(logits.argmax(-1).item(), expected_class_idx) @slow def test_inference_image_classification_head_imagenet_22k(self): model = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 21841)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([1.6881, -0.2787, 0.5901]).to(torch_device) torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) expected_class_idx = 2396 self.assertEqual(logits.argmax(-1).item(), expected_class_idx) @slow def test_inference_semantic_segmentation(self): model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") model = model.to(torch_device) image_processor = BeitImageProcessor(do_resize=True, size=640, do_center_crop=False) ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = ds[0]["image"].convert("RGB") inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 150, 160, 160)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8479, -0.9836, -1.7418], [-2.9449, -1.3333, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ], device=torch_device, ) torch.testing.assert_close(logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_post_processing_semantic_segmentation(self): model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") model = model.to(torch_device) image_processor = BeitImageProcessor(do_resize=True, size=640, do_center_crop=False) ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = ds[0]["image"].convert("RGB") inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((160, 160)) self.assertEqual(segmentation[0].shape, expected_shape) @slow def test_inference_interpolate_pos_encoding(self): model_name = "microsoft/beit-base-patch16-224-pt22k" model = BeitModel.from_pretrained(model_name, **{"use_absolute_position_embeddings": True}).to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") processor = BeitImageProcessor.from_pretrained(model_name) inputs = processor(images=image, return_tensors="pt", size={"height": 480, "width": 480}) pixel_values = inputs.pixel_values.to(torch_device) # with interpolate_pos_encoding being True the model should process the higher resolution image # successfully and produce the expected output. with torch.no_grad(): outputs = model(pixel_values, interpolate_pos_encoding=True) # num_cls_tokens + (height / patch_size) * (width / patch_size) # 1 + (480 / 16) * (480 / 16) = 1 + 30 * 30 = 901 expected_shape = torch.Size((1, 901, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) @require_torch class BeitBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (BeitBackbone,) if is_torch_available() else () config_class = BeitConfig def setUp(self): self.model_tester = BeitModelTester(self)
transformers/tests/models/beit/test_modeling_beit.py/0
{ "file_path": "transformers/tests/models/beit/test_modeling_beit.py", "repo_id": "transformers", "token_count": 9885 }
558
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Blip model.""" import inspect import os import tempfile import unittest import numpy as np import requests from transformers import BlipConfig, BlipTextConfig, BlipVisionConfig from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipTextModel, BlipVisionModel, ) if is_vision_available(): from PIL import Image from transformers import BlipProcessor class BlipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return BlipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = BlipVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class BlipVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Blip does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (BlipVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = BlipVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "Salesforce/blip-vqa-base" model = BlipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class BlipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return BlipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) def create_and_check_model(self, config, input_ids, input_mask): model = BlipTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BlipTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = BlipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass @slow def test_model_from_pretrained(self): model_name = "Salesforce/blip-vqa-base" model = BlipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class BlipModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig( text_config=self.text_model_tester.get_config().to_dict(), vision_config=self.vision_model_tester.get_config().to_dict(), projection_dim=64, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BlipModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": BlipModel, "image-to-text": BlipForConditionalGeneration, "visual-question-answering": BlipForQuestionAnswering, "image-text-to-text": BlipForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = True test_attention_outputs = False def setUp(self): self.model_tester = BlipModelTester(self) common_properties = ["logit_scale_init_value", "image_text_hidden_size", "projection_dim", "label_smoothing"] self.config_tester = ConfigTester( self, config_class=BlipConfig, has_text_modality=False, common_properties=common_properties ) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass # override as the `logit_scale` parameter initialization is different for Blip def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: # See PR #38607 (to avoid flakiness) data = torch.flatten(param.data) n_elements = torch.numel(data) # skip 2.5% of elements on each side to avoid issues caused by `nn.init.trunc_normal_` described in # https://github.com/huggingface/transformers/pull/27906#issuecomment-1846951332 n_elements_to_skip_on_each_side = int(n_elements * 0.025) data_to_check = torch.sort(data).values if n_elements_to_skip_on_each_side > 0: data_to_check = data_to_check[ n_elements_to_skip_on_each_side:-n_elements_to_skip_on_each_side ] self.assertIn( ((data_to_check.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # Blip needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save BlipConfig and check if we can load BlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save BlipConfig and check if we can load BlipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "Salesforce/blip-vqa-base" model = BlipModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_get_image_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() keys_to_pop = ["input_ids", "attention_mask", "return_loss"] for key in keys_to_pop: inputs_dict.pop(key) model = BlipModel(config).to(torch_device) model.eval() image_features = model.get_image_features(**inputs_dict) self.assertEqual( image_features.shape, ( self.model_tester.batch_size, model.projection_dim, ), ) def test_get_text_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() keys_to_pop = ["pixel_values", "return_loss"] for key in keys_to_pop: inputs_dict.pop(key) model = BlipModel(config).to(torch_device) model.eval() text_features = model.get_text_features(**inputs_dict) self.assertEqual( text_features.shape, ( self.model_tester.batch_size, model.projection_dim, ), ) def test_get_multimodal_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() keys_to_pop = ["return_loss"] for key in keys_to_pop: inputs_dict.pop(key) model = BlipModel(config).to(torch_device) model.eval() multimodal_features = model.get_multimodal_features(**inputs_dict) self.assertEqual( multimodal_features.shape, ( self.model_tester.batch_size, model.projection_dim, ), ) class BlipTextRetrievalModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig( text_config=self.text_model_tester.get_config().to_dict(), vision_config=self.vision_model_tester.get_config().to_dict(), projection_dim=64, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict class BlipTextImageModelsModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.seq_length = self.text_model_tester.seq_length # need seq_length for pt-tf equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig( text_config=self.text_model_tester.get_config().to_dict(), vision_config=self.vision_model_tester.get_config().to_dict(), projection_dim=64, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict class BlipVQAModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig( text_config=self.text_model_tester.get_config().to_dict(), vision_config=self.vision_model_tester.get_config().to_dict(), projection_dim=64, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "decoder_input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "labels": input_ids, } return config, inputs_dict @require_torch @require_vision class BlipVQAModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipForQuestionAnswering,) if is_torch_available() else () # Doesn't run generation tests due to custom generation logic -- won't fix all_generative_model_classes = () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = True test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = BlipVQAModelTester(self) def _prepare_inputs_for_vqa(self): _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"] inputs_dict.pop("return_loss") return inputs_dict def test_class_name_consistency(self): """ Tests that all VQA models have a class name that ends with "ForQuestionAnswering" """ for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()) self.assertTrue( model.__class__.__name__.endswith("ForQuestionAnswering"), f"Class name should end with 'ForVisualQuestionAnswering' got {model.__class__.__name__}", ) def test_training(self): """ Tests that all VQA models can be trained on a single batch """ for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()).to(torch_device) model.train() loss = model(**self.model_tester.prepare_config_and_inputs_for_common()[1]).loss loss.backward() # verify the gradients are not None for name, param in model.named_parameters(): self.assertIsNotNone(param.grad, f"Gradients should not be None - got {param.grad} for {name}") def test_forward_signature(self): """ Test if the forward function has the expected arguments. """ for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so args are the first n entries args = list(signature.parameters.keys()) expected_args = [ "input_ids", "attention_mask", "labels", "decoder_input_ids", "decoder_attention_mask", ] for arg in expected_args: self.assertTrue( arg in args, f"Argument {arg} of forward function signature should include {arg}. Found {args}.", ) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass @require_torch class BlipTextRetrievalModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipForImageTextRetrieval,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = True test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = BlipTextRetrievalModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] if model_class != BlipForConditionalGeneration else ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not setup for training") for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not setup for training") for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # override as the `logit_scale` parameter initialization is different for Blip def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # Blip needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save BlipConfig and check if we can load BlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save BlipConfig and check if we can load BlipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "Salesforce/blip-vqa-base" model = BlipModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipForConditionalGeneration,) if is_torch_available() else () # Doesn't run generation tests due to custom generation logic -- wont fix all_generative_model_classes = () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = True test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = BlipTextImageModelsModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] if model_class != BlipForConditionalGeneration else ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not setup for training") for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not setup for training") for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() # override as the `logit_scale` parameter initialization is different for Blip def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # Blip needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save BlipConfig and check if we can load BlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save BlipConfig and check if we can load BlipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "Salesforce/blip-vqa-base" model = BlipModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch @slow class BlipModelIntegrationTest(unittest.TestCase): def test_inference_image_captioning(self): model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") image = prepare_img() # image only inputs = processor(images=image, return_tensors="pt").to(torch_device) predictions = model.generate(**inputs) # Test output self.assertEqual(predictions[0].tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]) # image and context context = ["a picture of"] inputs = processor(images=image, text=context, return_tensors="pt").to(torch_device) predictions = model.generate(**inputs) # Test output self.assertEqual( predictions[0].tolist(), [30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102], ) @require_torch_accelerator @require_torch_fp16 def test_inference_image_captioning_fp16(self): model = BlipForConditionalGeneration.from_pretrained( "Salesforce/blip-image-captioning-base", dtype=torch.float16 ).to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") image = prepare_img() # image only inputs = processor(images=image, return_tensors="pt").to(torch_device, torch.float16) predictions = model.generate(**inputs) # Test output self.assertEqual(predictions[0].tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]) # image and context context = ["a picture of"] inputs = processor(images=image, text=context, return_tensors="pt").to(torch_device, torch.float16) predictions = model.generate(**inputs) # Test output self.assertEqual( predictions[0].tolist(), [30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102], ) def test_inference_interpolate_pos_encoding(self): model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") processor.image_processor.size = {"height": 500, "width": 500} image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) predictions = model.generate(**inputs, interpolate_pos_encoding=True) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() self.assertEqual(predictions[0].tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 1037, 3899, 102]) self.assertEqual(generated_text, "a woman sitting on the beach with a dog") def test_inference_vqa(self): model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base") image = prepare_img() text = "how many dogs are in the picture?" inputs = processor(image, text=text, return_tensors="pt").to(torch_device) out = model.generate(**inputs) # Test output self.assertEqual(out[0].tolist(), [30522, 1015, 102]) def test_inference_itm(self): model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco").to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-itm-base-coco") image = prepare_img() text = "A woman and her dog sitting in a beach" inputs = processor(image, text, return_tensors="pt").to(torch_device) out_itm = model(**inputs) out = model(**inputs, use_itm_head=False) expected_scores = torch.Tensor([[0.0029, 0.9971]]) torch.testing.assert_close(torch.nn.Softmax()(out_itm[0].cpu()), expected_scores, rtol=1e-3, atol=1e-3) torch.testing.assert_close(out[0].cpu(), torch.Tensor([[0.5162]]), rtol=1e-3, atol=1e-3)
transformers/tests/models/blip/test_modeling_blip.py/0
{ "file_path": "transformers/tests/models/blip/test_modeling_blip.py", "repo_id": "transformers", "token_count": 25850 }
559
# Copyright 2020 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import shutil import tempfile import unittest from functools import lru_cache from transformers import AddedToken, BatchEncoding, ByT5Tokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" class ByT5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = ByT5Tokenizer test_rust_tokenizer = False @classmethod def setUpClass(cls): super().setUpClass() tokenizer = ByT5Tokenizer() tokenizer.save_pretrained(cls.tmpdirname) @cached_property def t5_base_tokenizer(self): return ByT5Tokenizer.from_pretrained("google/byt5-small") @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs) -> ByT5Tokenizer: pretrained_name = pretrained_name or cls.tmpdirname return cls.tokenizer_class.from_pretrained(pretrained_name, **kwargs) def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. toks = [] for i in range(len(tokenizer)): try: tok = tokenizer.decode([i], clean_up_tokenization_spaces=False) except UnicodeDecodeError: pass toks.append((i, tok)) toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def test_eos_treatment(self): tokenizer = self.t5_base_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_multibytes_char(self): tokenizer = self.t5_base_tokenizer src_text = "Unicode €." encoded = tokenizer(src_text) encoded_ids = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "Unicode €.</s>") encoded = tokenizer("e è é ê ë") encoded_ids = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "e è é ê ë</s>") # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "e è é ê ë</s>") def test_prepare_batch_integration(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: skip batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 37), batch.input_ids.shape) self.assertEqual((2, 37), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length_integration(self): tokenizer = self.t5_base_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_eos_in_input(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] # fmt: skip expected_tgt_tokens = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: skip batch = tokenizer(src_text, text_target=tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, batch["labels"][0]) # cannot use default save_and_load_tokenizer test method because tokenizer has no vocab def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00e9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00e9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens( {"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False ) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) # There is a conflict between the default value of extra_ids and adding a new special token through additional_special_tokens # We need to add the extra_ids in the list of the arg additional_special_tokens def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(125)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_decode_single_bytes(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) tokenizer = tokenizer_class.from_pretrained(tmp_dir) self.assertTrue(tokenizer.decode([255]) == "") @unittest.skip(reason="ByT5Tokenizer does not have a vocabulary") def test_get_vocab(self): pass @unittest.skip(reason="inputs cannot be pretokenized as ids depend on whole input string") def test_pretokenized_inputs(self): pass @unittest.skip(reason="ByT5Tokenizer does not have a vocabulary") def test_conversion_reversible(self): pass def test_convert_tokens_to_string_format(self): # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str) # We need a different implementation of the test of the same name defined in TokenizerTesterMixin because this tokenizer # doesn't have a vocab def test_tokenizers_common_ids_setters(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] token_id_to_test_setters = 0 token_to_test_setters = tokenizer.convert_ids_to_tokens( token_id_to_test_setters, skip_special_tokens=False ) for attr in attributes_list: setattr(tokenizer, attr + "_id", None) self.assertEqual(getattr(tokenizer, attr), None) self.assertEqual(getattr(tokenizer, attr + "_id"), None) setattr(tokenizer, attr + "_id", token_id_to_test_setters) self.assertEqual(getattr(tokenizer, attr), token_to_test_setters) self.assertEqual(getattr(tokenizer, attr + "_id"), token_id_to_test_setters) setattr(tokenizer, "additional_special_tokens_ids", []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), []) setattr(tokenizer, "additional_special_tokens_ids", [token_id_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [token_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [token_id_to_test_setters])
transformers/tests/models/byt5/test_tokenization_byt5.py/0
{ "file_path": "transformers/tests/models/byt5/test_tokenization_byt5.py", "repo_id": "transformers", "token_count": 7934 }
560
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import random import unittest import numpy as np from datasets import load_dataset from transformers import ClapFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.trainer_utils import set_seed from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTester with Whisper->Clap class ClapFeatureExtractionTester: def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=10, hop_length=160, chunk_length=8, padding_value=0.0, sampling_rate=4_000, return_attention_mask=False, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize self.feature_size = feature_size self.chunk_length = chunk_length self.hop_length = hop_length def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class ClapFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = ClapFeatureExtractor # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.setUp with Whisper->Clap def setUp(self): self.feat_extract_tester = ClapFeatureExtractionTester(self) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features self.assertTrue(input_features.ndim == 4) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest._load_datasamples def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration_fusion_short_input(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ [ # "repeat" [ -20.1049, -19.9764, -20.0731, -19.5055, -27.5018, -22.5761, -26.6071, -29.0091, -26.4659, -26.4236, -28.8808, -31.9190, -32.4848, -34.1186, -34.0340, -32.8803, -30.9895, -37.6238, -38.0347, -40.6263, -36.3496, -42.2533, -32.9132, -27.7068, -29.3704, -30.3208, -22.5972, -27.1494, -30.1975, -31.1005, -29.9372, -27.1917, -25.9806, -30.3489, -33.2380, -31.9062, -36.5498, -32.8721, -30.5629, -27.4674, -22.2232, -22.5653, -16.3868, -17.2713, -25.9738, -30.6256, -34.3766, -31.1292, -27.8950, -27.0588, -25.6206, -23.0712, -26.6050, -28.0112, -32.6847, -34.3396, -34.9738, -35.8463, -39.2324, -37.1188, -33.3705, -28.9230, -28.9112, -28.6578 ], [ -36.7233, -30.0587, -24.8431, -18.4611, -16.8149, -23.9319, -32.8580, -34.2264, -27.4332, -26.8027, -29.2721, -33.9033, -39.3403, -35.3232, -26.8076, -28.6460, -35.2780, -36.0738, -35.4996, -37.7631, -39.5056, -34.7112, -36.8741, -34.1066, -32.9474, -33.6604, -27.9937, -30.9594, -26.2928, -32.0485, -29.2151, -29.2917, -32.7308, -29.6542, -31.1454, -37.0088, -32.3388, -37.3086, -31.1024, -27.2889, -19.6788, -21.1488, -19.5144, -14.8889, -21.2006, -24.7488, -27.7940, -31.1058, -27.5068, -21.5737, -22.3780, -21.5151, -26.3086, -30.9223, -33.5043, -32.0307, -37.3806, -41.6188, -45.6650, -40.5131, -32.5023, -26.7385, -26.3709, -26.7761 ] ], [ # "repeatpad" [ -25.7496, -24.9339, -24.1357, -23.1271, -23.7853, -26.1264, -29.1456, -33.2060, -37.8179, -42.4833, -41.9386, -41.2164, -42.3566, -44.2575, -40.0217, -36.6794, -36.6974, -38.7819, -42.0880, -45.5560, -39.9368, -36.3219, -35.5981, -36.6434, -35.1851, -33.0684, -30.0437, -30.2010, -34.3476, -42.1373, -38.8039, -37.3355, -40.4576, -41.0485, -40.6377, -38.2275, -42.7481, -34.6084, -34.7048, -29.5149, -26.3935, -26.8952, -34.1336, -26.2904, -28.2571, -32.5642, -36.7240, -35.5334, -38.2451, -34.8177, -28.9754, -25.1096, -27.9768, -32.3184, -37.0269, -40.5136, -40.8061, -36.4948, -40.3767, -38.9671, -38.3552, -34.1250, -30.9035, -31.6112 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ # None, same as "repeatpad" [ -25.7496, -24.9339, -24.1357, -23.1271, -23.7853, -26.1264, -29.1456, -33.2060, -37.8179, -42.4833, -41.9386, -41.2164, -42.3566, -44.2575, -40.0217, -36.6794, -36.6974, -38.7819, -42.0880, -45.5560, -39.9368, -36.3219, -35.5981, -36.6434, -35.1851, -33.0684, -30.0437, -30.2010, -34.3476, -42.1373, -38.8039, -37.3355, -40.4576, -41.0485, -40.6377, -38.2275, -42.7481, -34.6084, -34.7048, -29.5149, -26.3935, -26.8952, -34.1336, -26.2904, -28.2571, -32.5642, -36.7240, -35.5334, -38.2451, -34.8177, -28.9754, -25.1096, -27.9768, -32.3184, -37.0269, -40.5136, -40.8061, -36.4948, -40.3767, -38.9671, -38.3552, -34.1250, -30.9035, -31.6112 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ # "pad" [ -58.5260, -58.1155, -57.8623, -57.5059, -57.9178, -58.7171, -59.2343, -59.9833, -60.9764, -62.0722, -63.5723, -65.7111, -67.5153, -68.7088, -69.8325, -70.2987, -70.1548, -70.6233, -71.5702, -72.5159, -72.3821, -70.1817, -67.0315, -64.1387, -62.2202, -61.0717, -60.4951, -61.6005, -63.7358, -67.1400, -67.6185, -65.5635, -64.3593, -63.7138, -63.6209, -66.4950, -72.6284, -63.3961, -56.8334, -52.7319, -50.6310, -51.3728, -53.5619, -51.9190, -50.9708, -52.8684, -55.8073, -58.8227, -60.6991, -57.0547, -52.7611, -51.4388, -54.4892, -60.8950, -66.1024, -72.4352, -67.8538, -65.1463, -68.7588, -72.3080, -68.4864, -60.4688, -57.1516, -60.9460 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ] ] ) # fmt: on MEL_BIN = [[976, 977], [976, 977], [976, 977], [196, 197]] input_speech = self._load_datasamples(1) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, idx_in_mel in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, MEL_BIN ): input_features = feature_extractor(input_speech, return_tensors="pt", padding=padding).input_features self.assertEqual(input_features.shape, (1, 4, 1001, 64)) torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4) torch.testing.assert_close(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], rtol=1e-4, atol=1e-4) self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 1])) self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 2])) self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 3])) def test_integration_rand_trunc_short_input(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ [ # "repeat" [ -35.0483, -35.7865, -38.2884, -40.0220, -42.5349, -44.9489, -43.2228, -44.6499, -47.6253, -49.6983, -50.2127, -52.5483, -52.2223, -51.9157, -49.4082, -51.2024, -57.0476, -56.2803, -58.1618, -60.7474, -55.0389, -60.9514, -59.3080, -50.4419, -47.8172, -48.7570, -55.2552, -44.5036, -44.1148, -50.8218, -51.0968, -52.9408, -51.1037, -48.9789, -47.5897, -52.0915, -55.4216, -54.1529, -58.0149, -58.0866, -52.7798, -52.6154, -45.9144, -46.2008, -40.7603, -41.1703, -50.2250, -55.4112, -59.4818, -54.5795, -53.5552, -51.3668, -49.8358, -50.3186, -54.0452, -57.6030, -61.1589, -61.6415, -63.2756, -66.5890, -62.8543, -58.0665, -56.7203, -56.7632 ], [ -47.1320, -37.9961, -34.0076, -36.7109, -47.9057, -48.4924, -43.8371, -44.9728, -48.1689, -52.9141, -57.6077, -52.8520, -44.8502, -45.6764, -51.8389, -56.4284, -54.6972, -53.4889, -55.6077, -58.7149, -60.3760, -54.0136, -56.0730, -55.9870, -54.4017, -53.1094, -53.5640, -50.3064, -49.9520, -49.3239, -48.1668, -53.4852, -50.4561, -50.8688, -55.1970, -51.5538, -53.0260, -59.6933, -54.8183, -59.5895, -55.9589, -50.3761, -44.1282, -44.1463, -43.8540, -39.1168, -45.3893, -49.5542, -53.1505, -55.2870, -50.3921, -46.8511, -47.4444, -49.5633, -56.0034, -59.0815, -59.0018, -63.7589, -69.5745, -71.5789, -64.0498, -56.0558, -54.3475, -54.7004 ] ], [ # "repeatpad" [ -40.3184, -39.7186, -39.8807, -41.6508, -45.3613, -50.4785, -57.0297, -60.4944, -59.1642, -58.9495, -60.4661, -62.5300, -58.4759, -55.2865, -54.8973, -56.0780, -57.5482, -59.6557, -64.3309, -65.0330, -59.4941, -56.8552, -55.0519, -55.9817, -56.9739, -55.2827, -54.5312, -51.4141, -50.4289, -51.9131, -57.5821, -63.9979, -59.9180, -58.9489, -62.3247, -62.6975, -63.7948, -60.5250, -64.6107, -58.7905, -57.0229, -54.3084, -49.8445, -50.4459, -57.0172, -50.6425, -52.5992, -57.4207, -61.6358, -60.6540, -63.1968, -57.4360, -52.3263, -51.7695, -57.1946, -62.9610, -66.7359, -67.0335, -63.7440, -68.1775, -66.3798, -62.8650, -59.8972, -59.3139 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ # None, same as "repeatpad" [ -40.3184, -39.7186, -39.8807, -41.6508, -45.3613, -50.4785, -57.0297, -60.4944, -59.1642, -58.9495, -60.4661, -62.5300, -58.4759, -55.2865, -54.8973, -56.0780, -57.5482, -59.6557, -64.3309, -65.0330, -59.4941, -56.8552, -55.0519, -55.9817, -56.9739, -55.2827, -54.5312, -51.4141, -50.4289, -51.9131, -57.5821, -63.9979, -59.9180, -58.9489, -62.3247, -62.6975, -63.7948, -60.5250, -64.6107, -58.7905, -57.0229, -54.3084, -49.8445, -50.4459, -57.0172, -50.6425, -52.5992, -57.4207, -61.6358, -60.6540, -63.1968, -57.4360, -52.3263, -51.7695, -57.1946, -62.9610, -66.7359, -67.0335, -63.7440, -68.1775, -66.3798, -62.8650, -59.8972, -59.3139 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ # "pad" [ -73.3190, -73.6349, -74.1451, -74.8539, -75.7476, -76.5438, -78.5540, -80.1339, -81.8911, -83.7560, -85.5387, -86.7466, -88.2072, -88.6090, -88.8243, -89.0784, -89.4364, -89.8179, -91.3146, -92.2833, -91.7221, -90.9440, -88.1315, -86.2425, -84.2281, -82.4893, -81.5993, -81.1328, -81.5759, -83.1068, -85.6525, -88.9520, -88.9187, -87.2703, -86.3052, -85.7188, -85.8802, -87.9996, -95.0464, -88.0133, -80.8561, -76.5597, -74.2816, -74.8109, -77.3615, -76.0719, -75.3426, -77.6428, -80.9663, -84.5275, -84.9907, -80.5205, -77.2851, -78.6259, -84.7740, -91.4535, -98.1894, -94.3872, -92.3735, -97.6807, -98.1501, -91.4344, -85.2842, -88.4338 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ] ] ) # fmt: on MEL_BIN = [[976, 977], [976, 977], [976, 977], [196, 197]] input_speech = self._load_datasamples(1) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, idx_in_mel in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, MEL_BIN ): input_features = feature_extractor( input_speech, return_tensors="pt", truncation="rand_trunc", padding=padding ).input_features self.assertEqual(input_features.shape, (1, 1, 1001, 64)) torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4) torch.testing.assert_close(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], rtol=1e-4, atol=1e-4) def test_integration_fusion_long_input(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ [ -11.1830, -10.1894, -8.6051, -4.8578, -1.3268, -8.4606, -14.5453, -9.2017, 0.5781, 16.2129, 14.8289, 3.6326, -3.8794, -6.5544, -2.4408, 1.9531, 6.0967, 1.7590, -7.6730, -6.1571, 2.0052, 16.6694, 20.6447, 21.2145, 13.4972, 15.9043, 16.8987, 4.1766, 11.9428, 21.2372, 12.3016, 4.8604, 6.7241, 1.8543, 4.9235, 5.3188, -0.9897, -1.2416, -6.5864, 2.9529, 2.9274, 6.4753, 10.2300, 11.2127, 3.4042, -1.0055, -6.0475, -6.7524, -3.9801, -1.4434, 0.4740, -0.1584, -4.5457, -8.5746, -8.8428, -13.1475, -9.6079, -8.5798, -4.1143, -3.7966, -7.1651, -6.1517, -8.0258, -12.1486 ], [ -10.2017, -7.9924, -5.9517, -3.9372, -1.9735, -4.3130, 16.1647, 25.0592, 23.5532, 14.4974, -7.0778, -10.2262, 6.4782, 20.3454, 19.4269, 1.7976, -16.5070, 4.9380, 12.3390, 6.9285, -13.6325, -8.5298, 1.0839, -5.9629, -8.4812, 3.1331, -2.0963, -16.6046, -14.0070, -17.5707, -13.2080, -17.2168, -17.7770, -12.1111, -18.6184, -17.1897, -13.9801, -12.0426, -23.5400, -25.6823, -23.5813, -18.7847, -20.5473, -25.6458, -19.7585, -27.6007, -28.9276, -24.8948, -25.4458, -22.2807, -19.6613, -19.2669, -15.7813, -19.6821, -24.3439, -22.2598, -28.2631, -30.1017, -32.7646, -33.6525, -27.5639, -22.0548, -27.8054, -29.6947 ], [ -9.2078, -7.2963, -6.2095, -7.9959, -2.9280, -11.1843, -6.1490, 5.0733, 19.2957, 21.4578, 14.6803, -3.3153, -6.3334, -2.3542, 6.9509, 15.2965, 14.6620, 5.2075, -0.0873, 1.1919, 18.1986, 20.8470, 10.8035, 2.2516, 7.6905, 7.7427, -1.2543, -5.0018, 0.9809, -2.1584, -5.4580, -5.4760, -11.8888, -9.0605, -8.4638, -9.9897, -0.0540, -5.1629, 0.0483, -4.1504, -4.8140, -7.8236, -9.0622, -10.1742, -8.9597, -11.5380, -16.5603, -17.1858, -17.5032, -20.9326, -23.9543, -25.2602, -25.3429, -27.4536, -26.8859, -22.7852, -25.8288, -24.8399, -23.8893, -24.2096, -26.5415, -23.7281, -25.6851, -22.3629 ], [ 1.3448, 2.9883, 4.0366, -0.8019, -10.4191, -10.0883, -4.3812, 0.8136, 2.1579, 0.0832, 1.0949, -0.9759, -5.5319, -4.6009, -6.5452, -14.9155, -20.1584, -9.3611, -2.4271, 1.4031, 4.9910, 8.6916, 8.6785, 10.1973, 9.9029, 5.3840, 7.5336, 5.2803, 2.8144, -0.3138, 2.2216, 5.7328, 7.5574, 7.7402, 1.0681, 3.1049, 7.0742, 6.5588, 7.3712, 5.7881, 8.6874, 8.7725, 2.8133, -4.5809, -6.1317, -5.1719, -5.0192, -9.0977, -10.9391, -6.0769, 1.6016, -0.8965, -7.2252, -7.8632, -11.4468, -11.7446, -10.7447, -7.0601, -2.7748, -4.1798, -2.8433, -3.1352, 0.8097, 6.4212 ] ] ) # fmt: on MEL_BIN = 963 input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)]) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, block_idx in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, [1, 2, 0, 3] ): set_seed(987654321) input_features = feature_extractor(input_speech, return_tensors="pt", padding=padding).input_features self.assertEqual(input_features.shape, (1, 4, 1001, 64)) torch.testing.assert_close(input_features[0, block_idx, MEL_BIN], EXPECTED_VALUES, rtol=1e-3, atol=1e-3) def test_integration_rand_trunc_long_input(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ [ -35.4022, -32.7555, -31.2004, -32.7764, -42.5770, -41.6339, -43.1630, -44.5080, -44.3029, -48.9628, -39.5022, -39.2105, -43.1350, -43.2195, -48.4894, -52.2344, -57.6891, -52.2228, -45.5155, -44.2893, -43.4697, -46.6702, -43.7490, -40.4819, -42.7275, -46.3434, -46.8412, -41.2003, -43.1681, -46.2948, -46.1925, -47.8333, -45.6812, -44.9182, -41.7786, -43.3809, -44.3199, -42.8814, -45.4771, -46.7114, -46.9746, -42.7090, -41.6057, -38.3965, -40.1980, -41.0263, -34.1256, -28.3289, -29.0201, -30.4453, -29.5561, -30.1734, -25.9406, -19.0897, -15.8452, -20.1351, -23.6515, -23.1194, -17.1845, -19.4399, -23.6527, -22.8768, -20.7279, -22.7864 ], [ -35.7719, -27.2566, -23.6964, -27.5521, 0.2510, 7.4391, 1.3917, -13.3417, -28.1758, -17.0856, -5.7723, -0.8000, -7.8832, -15.5548, -30.5935, -24.7571, -13.7009, -10.3432, -21.2464, -24.8118, -19.4080, -14.9779, -11.7991, -18.4485, -20.1982, -17.3652, -20.6328, -28.2967, -25.7819, -21.8962, -28.5083, -29.5719, -30.2120, -35.7033, -31.8218, -34.0408, -37.7744, -33.9653, -31.3009, -30.9063, -28.6153, -32.2202, -28.5456, -28.8579, -32.5170, -37.9152, -43.0052, -46.4849, -44.0786, -39.1933, -33.2757, -31.6313, -42.6386, -52.3679, -53.5785, -55.6444, -47.0050, -47.6459, -56.6361, -60.6781, -61.5244, -55.8272, -60.4832, -58.1897 ], [ -38.2686, -36.6285, -32.5835, -35.1693, -37.7938, -37.4035, -35.3132, -35.6083, -36.3609, -40.9472, -36.7846, -36.1544, -38.9076, -39.3618, -35.4953, -34.2809, -39.9466, -39.7433, -34.8347, -37.5674, -41.5689, -38.9161, -34.3947, -30.2924, -30.4841, -34.5831, -28.9261, -24.8849, -31.2324, -27.1622, -27.2107, -25.9385, -30.1691, -30.9223, -23.9495, -25.6047, -26.7119, -28.5523, -27.7481, -32.8427, -35.4650, -31.0399, -31.2073, -30.5163, -22.9819, -20.8892, -19.2510, -24.7905, -28.9426, -28.1998, -26.7386, -25.0140, -27.9223, -32.9913, -33.1864, -34.9742, -38.5995, -39.6990, -29.3203, -22.4697, -25.6415, -33.5608, -33.0945, -27.1716 ], [ -33.2015, -28.7741, -21.9457, -23.4888, -32.1072, -8.6307, 3.2724, 5.9157, -0.9221, -30.1814, -31.0015, -27.4508, -27.0477, -9.5342, 0.3221, 0.6511, -7.1596, -25.9707, -32.8924, -32.2300, -13.8974, -0.4895, 0.9168, -10.7663, -27.1176, -35.0829, -11.6859, -4.8855, -11.8898, -26.6167, -5.6192, -3.8443, -19.7947, -14.4101, -8.6236, -21.2458, -21.0801, -17.9136, -24.4663, -18.6333, -24.8085, -15.5854, -15.4344, -11.5046, -22.3625, -27.3387, -32.4353, -30.9670, -31.3789, -35.4044, -34.4591, -25.2433, -28.0773, -33.8736, -33.0224, -33.3155, -38.5302, -39.2741, -36.6395, -34.7729, -32.4483, -42.4001, -49.2857, -39.1682 ] ] ) # fmt: on MEL_BIN = 963 SEEDS = [987654321, 1234, 666, 5555] input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)]) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, seed in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, SEEDS ): set_seed(seed) input_features = feature_extractor( input_speech, return_tensors="pt", truncation="rand_trunc", padding=padding ).input_features self.assertEqual(input_features.shape, (1, 1, 1001, 64)) torch.testing.assert_close(input_features[0, 0, MEL_BIN], EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
transformers/tests/models/clap/test_feature_extraction_clap.py/0
{ "file_path": "transformers/tests/models/clap/test_feature_extraction_clap.py", "repo_id": "transformers", "token_count": 19256 }
561
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the ColPali processor.""" import shutil import tempfile import unittest import torch from transformers import GemmaTokenizer from transformers.models.colpali.processing_colpali import ColPaliProcessor from transformers.testing_utils import get_tests_dir, require_torch, require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import ( ColPaliProcessor, PaliGemmaProcessor, SiglipImageProcessor, ) SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_vision class ColPaliProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = ColPaliProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = SiglipImageProcessor.from_pretrained("google/siglip-so400m-patch14-384") image_processor.image_seq_length = 0 tokenizer = GemmaTokenizer(SAMPLE_VOCAB, keep_accents=True) processor = PaliGemmaProcessor(image_processor=image_processor, tokenizer=tokenizer) processor.save_pretrained(cls.tmpdirname) @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) # Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens def test_get_num_vision_tokens(self): "Tests general functionality of the helper used internally in vLLM" processor = self.get_processor() output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)]) self.assertTrue("num_image_tokens" in output) self.assertEqual(len(output["num_image_tokens"]), 3) self.assertTrue("num_image_patches" in output) self.assertEqual(len(output["num_image_patches"]), 3) @require_torch @require_vision def test_process_images(self): # Processor configuration image_input = self.prepare_image_inputs() image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length") image_processor.image_seq_length = 14 # Get the processor processor = self.processor_class( tokenizer=tokenizer, image_processor=image_processor, ) # Process the image batch_feature = processor.process_images(images=image_input, return_tensors="pt") # Assertions self.assertIn("pixel_values", batch_feature) self.assertEqual(batch_feature["pixel_values"].shape, torch.Size([1, 3, 384, 384])) @require_torch @require_vision def test_process_queries(self): # Inputs queries = [ "Is attention really all you need?", "Are Benjamin, Antoine, Merve, and Jo best friends?", ] # Processor configuration image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length") image_processor.image_seq_length = 14 # Get the processor processor = self.processor_class( tokenizer=tokenizer, image_processor=image_processor, ) # Process the image batch_feature = processor.process_queries(text=queries, return_tensors="pt") # Assertions self.assertIn("input_ids", batch_feature) self.assertIsInstance(batch_feature["input_ids"], torch.Tensor) self.assertEqual(batch_feature["input_ids"].shape[0], len(queries)) # The following tests override the parent tests because ColPaliProcessor can only take one of images or text as input at a time. def test_tokenizer_defaults_preserved_by_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length") processor = self.processor_class(**processor_components) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() inputs = processor(text=input_str, return_tensors="pt") self.assertEqual(inputs[self.text_input_name].shape[-1], 117) def test_image_processor_defaults_preserved_by_image_kwargs(self): """ We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor. We then check that the mean of the pixel_values is less than or equal to 0 after processing. Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied. """ if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_components["image_processor"] = self.get_component( "image_processor", do_rescale=True, rescale_factor=-1 ) processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length") processor = self.processor_class(**processor_components) self.skip_processor_without_typed_kwargs(processor) image_input = self.prepare_image_inputs() inputs = processor(images=image_input, return_tensors="pt") self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0) def test_kwargs_overrides_default_tokenizer_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_components["tokenizer"] = self.get_component("tokenizer", padding="longest") processor = self.processor_class(**processor_components) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() inputs = processor(text=input_str, return_tensors="pt", max_length=112, padding="max_length") self.assertEqual(inputs[self.text_input_name].shape[-1], 112) def test_kwargs_overrides_default_image_processor_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_components["image_processor"] = self.get_component( "image_processor", do_rescale=True, rescale_factor=1 ) processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length") processor = self.processor_class(**processor_components) self.skip_processor_without_typed_kwargs(processor) image_input = self.prepare_image_inputs() inputs = processor(images=image_input, do_rescale=True, rescale_factor=-1, return_tensors="pt") self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0) def test_unstructured_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor = self.processor_class(**processor_components) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() inputs = processor( text=input_str, return_tensors="pt", do_rescale=True, rescale_factor=-1, padding="max_length", max_length=76, ) self.assertEqual(inputs[self.text_input_name].shape[-1], 76) def test_unstructured_kwargs_batched(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor = self.processor_class(**processor_components) self.skip_processor_without_typed_kwargs(processor) image_input = self.prepare_image_inputs(batch_size=2) inputs = processor( images=image_input, return_tensors="pt", do_rescale=True, rescale_factor=-1, padding="longest", max_length=76, ) self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0) def test_doubly_passed_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor = self.processor_class(**processor_components) self.skip_processor_without_typed_kwargs(processor) image_input = self.prepare_image_inputs() with self.assertRaises(ValueError): _ = processor( images=image_input, images_kwargs={"do_rescale": True, "rescale_factor": -1}, do_rescale=True, return_tensors="pt", ) def test_structured_kwargs_nested(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor = self.processor_class(**processor_components) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, "images_kwargs": {"do_rescale": True, "rescale_factor": -1}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } inputs = processor(text=input_str, **all_kwargs) self.skip_processor_without_typed_kwargs(processor) self.assertEqual(inputs[self.text_input_name].shape[-1], 76) def test_structured_kwargs_nested_from_dict(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor = self.processor_class(**processor_components) self.skip_processor_without_typed_kwargs(processor) image_input = self.prepare_image_inputs() # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, "images_kwargs": {"do_rescale": True, "rescale_factor": -1}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } inputs = processor(images=image_input, **all_kwargs) self.assertEqual(inputs[self.text_input_name].shape[-1], 76) # Can process only text or images at a time def test_model_input_names(self): processor = self.get_processor() image_input = self.prepare_image_inputs() inputs = processor(images=image_input) self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
transformers/tests/models/colpali/test_processing_colpali.py/0
{ "file_path": "transformers/tests/models/colpali/test_processing_colpali.py", "repo_id": "transformers", "token_count": 4836 }
562
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the dac feature extractor.""" import itertools import random import unittest import numpy as np from transformers import DacFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch # Copied from transformers.tests.encodec.test_feature_extraction_dac.EncodecFeatureExtractionTester with Encodec->Dac class DacFeatureExtractionTester: # Ignore copy def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=16000, hop_length=512, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.hop_length = hop_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate # Ignore copy def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "hop_length": self.hop_length, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: audio_inputs = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size audio_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: audio_inputs = [np.asarray(x) for x in audio_inputs] return audio_inputs @require_torch # Copied from transformers.tests.encodec.test_feature_extraction_dac.EnCodecFeatureExtractionTest with Encodec->Dac class DacFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = DacFeatureExtractor def setUp(self): self.feat_extract_tester = DacFeatureExtractionTester(self) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 audio_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_audio_inputs = [np.asarray(audio_input) for audio_input in audio_inputs] # Test not batched input encoded_sequences_1 = feat_extract(audio_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_audio_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feat_extract(audio_inputs, padding=True, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_audio_inputs, padding=True, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_double_precision_pad(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_audio_inputs = np.random.rand(100).astype(np.float64) py_audio_inputs = np_audio_inputs.tolist() for inputs in [py_audio_inputs, np_audio_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech audio_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in audio_samples] def test_integration(self): # fmt: off EXPECTED_INPUT_VALUES = torch.tensor( [ 2.3803711e-03, 2.0751953e-03, 1.9836426e-03, 2.1057129e-03, 1.6174316e-03, 3.0517578e-04, 9.1552734e-05, 3.3569336e-04, 9.7656250e-04, 1.8310547e-03, 2.0141602e-03, 2.1057129e-03, 1.7395020e-03, 4.5776367e-04, -3.9672852e-04, 4.5776367e-04, 1.0070801e-03, 9.1552734e-05, 4.8828125e-04, 1.1596680e-03, 7.3242188e-04, 9.4604492e-04, 1.8005371e-03, 1.8310547e-03, 8.8500977e-04, 4.2724609e-04, 4.8828125e-04, 7.3242188e-04, 1.0986328e-03, 2.1057129e-03] ) # fmt: on input_audio = self._load_datasamples(1) feature_extractor = DacFeatureExtractor() input_values = feature_extractor(input_audio, return_tensors="pt")["input_values"] self.assertEqual(input_values.shape, (1, 1, 93696)) torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4) audio_input_end = torch.tensor(input_audio[0][-30:], dtype=torch.float32) torch.testing.assert_close(input_values[0, 0, -46:-16], audio_input_end, rtol=1e-4, atol=1e-4) # Ignore copy @unittest.skip("The DAC model doesn't support stereo logic") def test_integration_stereo(self): pass # Ignore copy def test_truncation_and_padding(self): input_audio = self._load_datasamples(2) # would be easier if the stride was like feature_extractor = DacFeatureExtractor() # pad and trunc raise an error ? with self.assertRaisesRegex( ValueError, "^Both padding and truncation were set. Make sure you only set one.$", ): truncated_outputs = feature_extractor( input_audio, padding="max_length", truncation=True, return_tensors="pt" ).input_values # force truncate to max_length truncated_outputs = feature_extractor( input_audio, truncation=True, max_length=48000, return_tensors="pt" ).input_values self.assertEqual(truncated_outputs.shape, (2, 1, 48128)) # pad: padded_outputs = feature_extractor(input_audio, padding=True, return_tensors="pt").input_values self.assertEqual(padded_outputs.shape, (2, 1, 93696)) # force pad to max length truncated_outputs = feature_extractor( input_audio, padding="max_length", max_length=100000, return_tensors="pt" ).input_values self.assertEqual(truncated_outputs.shape, (2, 1, 100352)) # force no pad with self.assertRaisesRegex( ValueError, "^Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.$", ): truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values self.assertEqual(truncated_outputs.shape, (1, 1, 93680))
transformers/tests/models/dac/test_feature_extraction_dac.py/0
{ "file_path": "transformers/tests/models/dac/test_feature_extraction_dac.py", "repo_id": "transformers", "token_count": 3857 }
563
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch DiffLlama model.""" import gc import tempfile import unittest import pytest from packaging import version from parameterized import parameterized from transformers import AutoTokenizer, DiffLlamaConfig, StaticCache, is_torch_available, set_seed from transformers.testing_utils import ( backend_empty_cache, cleanup, require_bitsandbytes, require_flash_attn, require_read_token, require_torch, require_torch_accelerator, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DiffLlamaForCausalLM, DiffLlamaForQuestionAnswering, DiffLlamaForSequenceClassification, DiffLlamaForTokenClassification, DiffLlamaModel, ) from transformers.models.diffllama.modeling_diffllama import ( DiffLlamaRotaryEmbedding, ) class DiffLlamaModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DiffLlamaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DiffLlamaModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class DiffLlamaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DiffLlamaModel, DiffLlamaForCausalLM, DiffLlamaForSequenceClassification, DiffLlamaForQuestionAnswering, DiffLlamaForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": DiffLlamaModel, "text-classification": DiffLlamaForSequenceClassification, "text-generation": DiffLlamaForCausalLM, "zero-shot": DiffLlamaForSequenceClassification, "question-answering": DiffLlamaForQuestionAnswering, "token-classification": DiffLlamaForTokenClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = False # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] # used in `test_torch_compile_for_training` _torch_compile_train_cls = DiffLlamaForCausalLM if is_torch_available() else None def setUp(self): self.model_tester = DiffLlamaModelTester(self) self.config_tester = ConfigTester(self, config_class=DiffLlamaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_diffllama_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = DiffLlamaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_diffllama_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = DiffLlamaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_diffllama_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = DiffLlamaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_diffllama_token_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels) model = DiffLlamaForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=token_labels) self.assertEqual( result.logits.shape, (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), ) @parameterized.expand([("linear",), ("dynamic",), ("yarn",)]) def test_model_rope_scaling_from_config(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = DiffLlamaModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = DiffLlamaModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) def test_model_rope_scaling(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) # Inputs x = torch.randn( 1, dtype=torch.float32, device=torch_device ) # used exclusively to get the dtype and the device position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device) position_ids_short = position_ids_short.unsqueeze(0) position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device) position_ids_long = position_ids_long.unsqueeze(0) # Sanity check original RoPE original_rope = DiffLlamaRotaryEmbedding(config=config).to(torch_device) original_cos_short, original_sin_short = original_rope(x, position_ids_short) original_cos_long, original_sin_long = original_rope(x, position_ids_long) torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :]) torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :]) # Sanity check linear RoPE scaling # New position "x" should match original position with index "x/scaling_factor" config.rope_scaling = {"type": "linear", "factor": scaling_factor} linear_scaling_rope = DiffLlamaRotaryEmbedding(config=config).to(torch_device) linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short) linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long) torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :]) torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :]) for new_position in range(0, long_input_length, scaling_factor): original_position = int(new_position // scaling_factor) torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :]) torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :]) # Sanity check Dynamic NTK RoPE scaling # Scaling should only be observed after a long input is fed. We can observe that the frequencies increase # with scaling_factor (or that `inv_freq` decreases) config.rope_scaling = {"type": "dynamic", "factor": scaling_factor} ntk_scaling_rope = DiffLlamaRotaryEmbedding(config=config).to(torch_device) ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short) ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long) torch.testing.assert_close(ntk_cos_short, original_cos_short) torch.testing.assert_close(ntk_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_sin_long, original_sin_long) self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all()) # Sanity check Yarn RoPE scaling # Scaling should be over the entire input config.rope_scaling = {"type": "yarn", "factor": scaling_factor} yarn_scaling_rope = DiffLlamaRotaryEmbedding(config=config).to(torch_device) yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short) yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long) torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :]) torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :]) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_short, original_cos_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_long, original_sin_long) def test_model_loading_old_rope_configs(self): def _reinitialize_config(base_config, new_kwargs): # Reinitialize the config with the new kwargs, forcing the config to go through its __init__ validation # steps. base_config_dict = base_config.to_dict() new_config = DiffLlamaConfig.from_dict(config_dict={**base_config_dict, **new_kwargs}) return new_config # from untouched config -> ✅ base_config, model_inputs = self.model_tester.prepare_config_and_inputs_for_common() original_model = DiffLlamaForCausalLM(base_config).to(torch_device) original_model(**model_inputs) # from a config with the expected rope configuration -> ✅ config = _reinitialize_config(base_config, {"rope_scaling": {"rope_type": "linear", "factor": 10.0}}) original_model = DiffLlamaForCausalLM(config).to(torch_device) original_model(**model_inputs) # from a config with the old rope configuration ('type' instead of 'rope_type') -> ✅ we gracefully handle BC config = _reinitialize_config(base_config, {"rope_scaling": {"type": "linear", "factor": 10.0}}) original_model = DiffLlamaForCausalLM(config).to(torch_device) original_model(**model_inputs) # from a config with both 'type' and 'rope_type' -> ✅ they can coexist (and both are present in the config) config = _reinitialize_config( base_config, {"rope_scaling": {"type": "linear", "rope_type": "linear", "factor": 10.0}} ) self.assertTrue(config.rope_scaling["type"] == "linear") self.assertTrue(config.rope_scaling["rope_type"] == "linear") original_model = DiffLlamaForCausalLM(config).to(torch_device) original_model(**model_inputs) # from a config with parameters in a bad range ('factor' should be >= 1.0) -> ⚠️ throws a warning with self.assertLogs("transformers.modeling_rope_utils", level="WARNING") as logs: config = _reinitialize_config(base_config, {"rope_scaling": {"rope_type": "linear", "factor": -999.0}}) original_model = DiffLlamaForCausalLM(config).to(torch_device) original_model(**model_inputs) self.assertEqual(len(logs.output), 1) self.assertIn("factor field", logs.output[0]) # from a config with unknown parameters ('foo' isn't a rope option) -> ⚠️ throws a warning with self.assertLogs("transformers.modeling_rope_utils", level="WARNING") as logs: config = _reinitialize_config( base_config, {"rope_scaling": {"rope_type": "linear", "factor": 10.0, "foo": "bar"}} ) original_model = DiffLlamaForCausalLM(config).to(torch_device) original_model(**model_inputs) self.assertEqual(len(logs.output), 1) self.assertIn("Unrecognized keys", logs.output[0]) # from a config with specific rope type but missing one of its mandatory parameters -> ❌ throws exception with self.assertRaises(KeyError): config = _reinitialize_config(base_config, {"rope_scaling": {"rope_type": "linear"}}) # missing "factor" @require_flash_attn @require_torch_gpu @require_bitsandbytes @pytest.mark.flash_attn_test @require_read_token @slow def test_flash_attn_2_generate_padding_right(self): """ Overwriting the common test as the test is flaky on tiny models """ model = DiffLlamaForCausalLM.from_pretrained( "kajuma/DiffLlama-0.3B-handcut", load_in_4bit=True, device_map={"": 0}, ) tokenizer = AutoTokenizer.from_pretrained("kajuma/DiffLlama-0.3B-handcut") texts = ["hi", "Hello this is a very long sentence"] tokenizer.padding_side = "right" tokenizer.pad_token = tokenizer.eos_token inputs = tokenizer(texts, return_tensors="pt", padding=True).to(0) output_native = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_native = tokenizer.batch_decode(output_native) model = DiffLlamaForCausalLM.from_pretrained( "kajuma/DiffLlama-0.3B-handcut", load_in_4bit=True, device_map={"": 0}, attn_implementation="flash_attention_2", ) output_fa_2 = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_fa_2 = tokenizer.batch_decode(output_fa_2) self.assertListEqual(output_native, output_fa_2) @require_flash_attn @require_torch_gpu @slow @pytest.mark.flash_attn_test def test_use_flash_attention_2_true(self): """ NOTE: this is the only test testing that the legacy `use_flash_attention=2` argument still works as intended. """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) new_model = DiffLlamaForCausalLM.from_pretrained( tmp_dir, attn_implementation="flash_attention_2", dtype=torch.float16 ).to("cuda") self.assertTrue(new_model.config._attn_implementation == "flash_attention_2") has_flash = False for name, submodule in new_model.named_modules(): if "FlashAttention" in submodule.__class__.__name__: has_flash = True break if not has_flash: raise ValueError("The flash model should have flash attention layers") @slow def test_eager_matches_sdpa_generate(self): """ Overwriting the common test as the test is flaky on tiny models """ max_new_tokens = 30 tokenizer = AutoTokenizer.from_pretrained("kajuma/DiffLlama-0.3B-handcut") model_sdpa = DiffLlamaForCausalLM.from_pretrained( "kajuma/DiffLlama-0.3B-handcut", dtype=torch.float16, ).to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = DiffLlamaForCausalLM.from_pretrained( "kajuma/DiffLlama-0.3B-handcut", dtype=torch.float16, attn_implementation="eager", ).to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: has_sdpa = True break if not has_sdpa: raise ValueError("The SDPA model should have SDPA attention layers") texts = [ "hi here's a longer context, getting longer and", "Hello this is a very long sentence my friend, very long for real", "Today I am in Paris and", ] for padding_side in ["left", "right"]: tokenizer.padding_side = padding_side tokenizer.pad_token = tokenizer.eos_token inputs = tokenizer(texts, return_tensors="pt", padding=True).to(torch_device) res_eager = model_eager.generate(**inputs, max_new_tokens=max_new_tokens, do_sample=False) res_sdpa = model_sdpa.generate(**inputs, max_new_tokens=max_new_tokens, do_sample=False) with self.subTest(f"{padding_side}"): torch.testing.assert_close( res_eager, res_sdpa, msg=f"\n{tokenizer.batch_decode(res_eager)} \nvs\n{tokenizer.batch_decode(res_sdpa)}", ) @require_torch_accelerator class DiffLlamaIntegrationTest(unittest.TestCase): def tearDown(self): # See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed. cleanup(torch_device, gc_collect=False) @slow @require_torch_accelerator @require_read_token @pytest.mark.torch_compile_test def test_compile_static_cache(self): # `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2 # work as intended. See https://github.com/pytorch/pytorch/issues/121943 if version.parse(torch.__version__) < version.parse("2.3.0"): self.skipTest(reason="This test requires torch >= 2.3 to run.") NUM_TOKENS_TO_GENERATE = 40 # Note on `EXPECTED_TEXT_COMPLETION`'s diff: the current value matches the original test if the original test # was changed to have a cache of 53 tokens (as opposed to 4096), on Ampere GPUs. EXPECTED_TEXT_COMPLETION = [ "Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial " "reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe " "theory of relativ", "My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, " "my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p", ] prompts = [ "Simply put, the theory of relativity states that ", "My favorite all time favorite condiment is ketchup.", ] tokenizer = AutoTokenizer.from_pretrained( "kajuma/DiffLlama-0.3B-handcut", pad_token="</s>", padding_side="right" ) model = DiffLlamaForCausalLM.from_pretrained( "kajuma/DiffLlama-0.3B-handcut", device_map=torch_device, dtype=torch.float16 ) inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) # Dynamic Cache generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False) dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text) # Static Cache generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static" ) static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text) # Static Cache + compile model._cache = None # clear cache object, initialized when we pass `cache_implementation="static"` model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static" ) static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text) @slow @require_torch_accelerator class Mask4DTestHard(unittest.TestCase): def tearDown(self): gc.collect() backend_empty_cache(torch_device) def setUp(self): model_name = "kajuma/DiffLlama-0.3B-handcut" self.model_dtype = torch.float32 self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.model = DiffLlamaForCausalLM.from_pretrained(model_name, dtype=self.model_dtype).to(torch_device) def get_test_data(self): template = "my favorite {}" items = ("pet is a", "artist plays a", "name is L") # same number of tokens in each item batch_separate = [template.format(x) for x in items] # 3 separate lines batch_shared_prefix = template.format(" ".join(items)) # 1 line with options concatenated input_ids = self.tokenizer(batch_separate, return_tensors="pt").input_ids.to(torch_device) input_ids_shared_prefix = self.tokenizer(batch_shared_prefix, return_tensors="pt").input_ids.to(torch_device) mask_shared_prefix = torch.tensor( [ [ [ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1], ] ] ], device=torch_device, ) position_ids = torch.arange(input_ids.shape[1]).tile(input_ids.shape[0], 1).to(torch_device) # building custom positions ids based on custom mask position_ids_shared_prefix = (mask_shared_prefix.sum(dim=-1) - 1).reshape(1, -1) # effectively: position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5]]).to(device) # inverting the mask min_dtype = torch.finfo(self.model_dtype).min mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=self.model_dtype) * min_dtype return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix def test_stacked_causal_mask(self): ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self.get_test_data() # regular batch logits = self.model.forward(input_ids, position_ids=position_ids).logits logits_last = logits[:, -1, :] # last tokens in each batch line decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)] # single forward run with 4D custom mask logits_shared_prefix = self.model.forward( input_ids_shared_prefix, attention_mask=mask_shared_prefix, position_ids=position_ids_shared_prefix ).logits logits_shared_prefix_last = logits_shared_prefix[ 0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], : ] # last three tokens decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)] self.assertEqual(decoded, decoded_shared_prefix) def test_partial_stacked_causal_mask(self): # Same as the test above, but the input is passed in two groups. It tests that we can pass partial 4D attention masks ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self.get_test_data() # regular batch logits = self.model.forward(input_ids, position_ids=position_ids).logits logits_last = logits[:, -1, :] # last tokens in each batch line decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)] # 2 forward runs with custom 4D masks part_a = 3 # split point input_1a = input_ids_shared_prefix[:, :part_a] position_ids_1a = position_ids_shared_prefix[:, :part_a] mask_1a = mask_shared_prefix[:, :, :part_a, :part_a] outs_1a = self.model.forward(input_1a, attention_mask=mask_1a, position_ids=position_ids_1a) past_key_values_a = outs_1a["past_key_values"] # Case 1: we pass a 4D attention mask regarding the current sequence length (i.e. [..., seq_len, full_len]) input_1b = input_ids_shared_prefix[:, part_a:] position_ids_1b = position_ids_shared_prefix[:, part_a:] mask_1b = mask_shared_prefix[:, :, part_a:, :] outs_1b = self.model.forward( input_1b, attention_mask=mask_1b, position_ids=position_ids_1b, past_key_values=past_key_values_a, ) decoded_1b = [ self.tokenizer.decode(t) for t in outs_1b.logits.argmax(-1)[ 0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1] - part_a ] ] self.assertEqual(decoded, decoded_1b) def test_stacked_causal_mask_static_cache(self): """same as above but with StaticCache""" ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self.get_test_data() # regular batch logits = self.model.forward(input_ids, position_ids=position_ids).logits logits_last = logits[:, -1, :] # last tokens in each batch line decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)] # upgrade the model with StaticCache max_cache_len = 16 # note that max_cache_len is greater than the attention_mask.shape[-1] past_key_values = StaticCache(config=self.model.config, max_cache_len=max_cache_len) padded_attention_mask = torch.nn.functional.pad( input=mask_shared_prefix, pad=(0, max_cache_len - mask_shared_prefix.shape[-1]), mode="constant", value=torch.finfo(self.model_dtype).min, ) # single forward run with 4D custom mask logits_shared_prefix = self.model.forward( input_ids_shared_prefix, attention_mask=padded_attention_mask, position_ids=position_ids_shared_prefix, cache_position=torch.arange(input_ids_shared_prefix.shape[-1], device=torch_device), past_key_values=past_key_values, ).logits logits_shared_prefix_last = logits_shared_prefix[ 0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], : ] # last three tokens decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)] self.assertEqual(decoded, decoded_shared_prefix) def test_partial_stacked_causal_mask_static_cache(self): # Same as the test above, but the input is passed in two groups. It tests that we can pass partial 4D attention masks # we pass a 4D attention mask shaped [..., seq_len, full_static_cache_len]) ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self.get_test_data() # regular batch logits = self.model.forward(input_ids, position_ids=position_ids).logits logits_last = logits[:, -1, :] # last tokens in each batch line decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)] # upgrade the model with StaticCache max_cache_len = 16 # note that max_cache_len is greater than the attention_mask.shape[-1] past_key_values = StaticCache(config=self.model.config, max_cache_len=max_cache_len) # forward run for the first part of input part_a = 3 # split point input_1a = input_ids_shared_prefix[:, :part_a] position_ids_1a = position_ids_shared_prefix[:, :part_a] mask_1a = mask_shared_prefix[:, :, :part_a, :part_a] padded_mask_1a = torch.nn.functional.pad( input=mask_1a, pad=(0, max_cache_len - mask_1a.shape[-1]), mode="constant", value=torch.finfo(self.model_dtype).min, ) _ = self.model.forward( input_1a, attention_mask=padded_mask_1a, position_ids=position_ids_1a, cache_position=torch.arange(part_a, device=torch_device), past_key_values=past_key_values, ) # forward run for the second part of input input_1b = input_ids_shared_prefix[:, part_a:] position_ids_1b = position_ids_shared_prefix[:, part_a:] mask_1b = mask_shared_prefix[:, :, part_a:, :] padded_mask_1b = torch.nn.functional.pad( input=mask_1b, pad=(0, max_cache_len - mask_1b.shape[-1]), mode="constant", value=0 ) outs_1b = self.model.forward( input_1b, attention_mask=padded_mask_1b, position_ids=position_ids_1b, cache_position=torch.arange( part_a, input_ids_shared_prefix.shape[-1], device=torch_device, ), past_key_values=past_key_values, ) decoded_1b = [ self.tokenizer.decode(t) for t in outs_1b.logits.argmax(-1)[ 0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1] - part_a ] ] self.assertEqual(decoded, decoded_1b)
transformers/tests/models/diffllama/test_modeling_diffllama.py/0
{ "file_path": "transformers/tests/models/diffllama/test_modeling_diffllama.py", "repo_id": "transformers", "token_count": 17281 }
564
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class DiTIntegrationTest(unittest.TestCase): @slow def test_for_image_classification(self): image_processor = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip") model = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip") model.to(torch_device) from datasets import load_dataset dataset = load_dataset("nielsr/rvlcdip-demo") image = dataset["train"][0]["image"].convert("RGB") inputs = image_processor(image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits expected_shape = torch.Size((1, 16)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [-0.4158, -0.4092, -0.4347], device=torch_device, dtype=torch.float, ) torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/dit/test_modeling_dit.py/0
{ "file_path": "transformers/tests/models/dit/test_modeling_dit.py", "repo_id": "transformers", "token_count": 743 }
565
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch DPT model.""" import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTModelTester: def __init__( self, parent, batch_size=2, image_size=32, patch_size=16, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=4, backbone_out_indices=[0, 1, 2, 3], num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, backbone_featmap_shape=[1, 32, 24, 24], neck_hidden_sizes=[16, 16, 32, 32], is_hybrid=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.backbone_out_indices = backbone_out_indices self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.backbone_featmap_shape = backbone_featmap_shape self.scope = scope self.is_hybrid = is_hybrid self.neck_hidden_sizes = neck_hidden_sizes # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): backbone_config = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [16, 16, 32, 32], "num_groups": 2, } return DPTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, fusion_hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=backbone_config, backbone=None, backbone_featmap_shape=self.backbone_featmap_shape, neck_hidden_sizes=self.neck_hidden_sizes, ) def create_and_check_model(self, config, pixel_values, labels): model = DPTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () pipeline_model_mapping = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_torch_exportable = True def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_batching_equivalence(self, atol=2e-5, rtol=2e-5): super().test_batching_equivalence(atol=atol, rtol=rtol) @unittest.skip(reason="DPT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) def test_training(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing: continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone backbone_params = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def test_model_from_pretrained(self): model_name = "Intel/dpt-hybrid-midas" model = DPTModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_raise_readout_type(self): # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.readout_type = "add" with self.assertRaises(ValueError): _ = DPTForDepthEstimation(config) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class DPTModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 384, 384)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(torch_device) torch.testing.assert_close(outputs.predicted_depth[:3, :3, :3] / 100, expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/dpt/test_modeling_dpt_hybrid.py/0
{ "file_path": "transformers/tests/models/dpt/test_modeling_dpt_hybrid.py", "repo_id": "transformers", "token_count": 5837 }
566
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Evolla model.""" import unittest from parameterized import parameterized from transformers import BitsAndBytesConfig, EvollaConfig, is_torch_available from transformers.testing_utils import ( TestCasePlus, require_bitsandbytes, require_torch, slow, torch_device, ) from transformers.utils import ( cached_property, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EvollaForProteinText2Text, EvollaModel, EvollaProcessor class EvollaModelTester: def __init__( self, parent, batch_size=1, is_training=False, text_seq_length=20, text_vocab_size=100, protein_seq_length=10, protein_vocab_size=20, hidden_size=4, # llama hidden size intermediate_size=7, # llama intermediate size num_hidden_layers=1, # llama hidden layers num_attention_heads=2, # llama attention heads num_key_value_heads=2, # llama key value heads protein_hidden_size=8, # protein encoder hidden size protein_num_hidden_layers=1, # protein encoder hidden layers protein_num_attention_heads=4, # protein encoder attention heads protein_intermediate_size=11, # protein encoder intermediate size resampler_num_latents=7, # sequence compressor num latents resampler_ff_mult=1, # sequence compressor ff mult resampler_depth=2, # sequence compressor depth resampler_dim_head=4, # sequence compressor dim head resampler_heads=2, # sequence compressor heads aligner_num_add_layers=1, # sequence aligner num add layers aligner_ffn_mult=1, # sequence aligner ffn mult use_input_mask=True, ): self.parent = parent self.batch_size = batch_size self.protein_seq_length = protein_seq_length self.protein_vocab_size = protein_vocab_size self.text_seq_length = text_seq_length self.text_vocab_size = text_vocab_size self.seq_length = text_seq_length self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.protein_hidden_size = protein_hidden_size self.protein_num_hidden_layers = protein_num_hidden_layers self.protein_num_attention_heads = protein_num_attention_heads self.protein_intermediate_size = protein_intermediate_size self.resampler_num_latents = resampler_num_latents self.resampler_ff_mult = resampler_ff_mult self.resampler_depth = resampler_depth self.resampler_dim_head = resampler_dim_head self.resampler_heads = resampler_heads self.aligner_num_add_layers = aligner_num_add_layers self.aligner_ffn_mult = aligner_ffn_mult self.use_input_mask = use_input_mask self.is_training = is_training @property def is_encoder_decoder(self): return False def prepare_config_and_inputs(self, num_proteins=None): batch_size = num_proteins if num_proteins is not None else self.batch_size text_input_ids = ids_tensor([batch_size, self.text_seq_length], self.text_vocab_size) protein_input_ids = ids_tensor([batch_size, self.protein_seq_length], self.protein_vocab_size) if self.use_input_mask: text_input_mask = random_attention_mask([batch_size, self.text_seq_length]) protein_input_mask = random_attention_mask([batch_size, self.protein_seq_length]) config = self.get_config() return (config, text_input_ids, text_input_mask, protein_input_ids, protein_input_mask) def get_config(self): return EvollaConfig( protein_encoder_config={ "vocab_size": self.protein_vocab_size, "hidden_size": self.protein_hidden_size, "num_hidden_layers": self.protein_num_hidden_layers, "num_attention_heads": self.protein_num_attention_heads, "intermediate_size": self.protein_intermediate_size, }, vocab_size=self.text_vocab_size, hidden_size=self.hidden_size, intermediate_size=self.intermediate_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, aligner_ffn_mult=self.aligner_ffn_mult, aligner_num_add_layers=self.aligner_num_add_layers, resampler_depth=self.resampler_depth, resampler_dim_head=self.resampler_dim_head, resampler_heads=self.resampler_heads, resampler_num_latents=self.resampler_num_latents, resampler_ff_mult=self.resampler_ff_mult, ) def create_and_check_model( self, config, input_ids, input_mask, protein_input_ids, protein_input_mask, batch_size=None, ): batch_size = batch_size if batch_size is not None else self.batch_size model = EvollaModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, protein_input_ids=protein_input_ids, protein_attention_mask=protein_input_mask, ) self.parent.assertEqual(result.last_hidden_state.shape, (batch_size, input_ids.shape[1], self.hidden_size)) def create_and_check_model_gen( self, config, input_ids, input_mask, protein_input_ids, protein_input_mask, ): model = EvollaForProteinText2Text(config) model.to(torch_device) model.eval() model.generate( input_ids, attention_mask=input_mask, protein_input_ids=protein_input_ids, protein_attention_mask=protein_input_mask, max_length=self.seq_length + 2, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, text_input_ids, text_input_mask, protein_input_ids, protein_input_mask) = config_and_inputs inputs_dict = { "input_ids": text_input_ids, "attention_mask": text_input_mask, "protein_input_ids": protein_input_ids, "protein_attention_mask": protein_input_mask, } return config, inputs_dict @require_torch class EvollaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (EvollaModel, EvollaForProteinText2Text) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": EvollaModel} if is_torch_available() else {} test_pruning = False test_headmasking = False test_torchscript = False test_resize_embeddings = False maxDiff = None def setUp(self): self.model_tester = EvollaModelTester(self) self.config_tester = ConfigTester(self, config_class=EvollaConfig, hidden_size=37) @property def is_encoder_decoder(self): return self.model_tester.is_encoder_decoder def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) # XXX: EvollaForProteinText2Text has no MODEL_FOR group yet, but it should be the same # as MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, so for now manually changing to do the right thing # as super won't do it if return_labels: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def test_model_outputs_equivalence(self): try: orig = self.all_model_classes # EvollaModel.forward doesn't have labels input arg - only EvollaForProteinText2Text does self.all_model_classes = (EvollaForProteinText2Text,) if is_torch_available() else () super().test_model_outputs_equivalence() finally: self.all_model_classes = orig def test_config(self): self.config_tester.run_common_tests() def test_model_single_protein(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(num_proteins=1) self.model_tester.create_and_check_model(*config_and_inputs, batch_size=1) def test_model_multiple_proteins(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(num_proteins=2) self.model_tester.create_and_check_model(*config_and_inputs, batch_size=2) def test_generate_single_protein(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(num_proteins=1) self.model_tester.create_and_check_model_gen(*config_and_inputs) def test_generate_multiple_proteins(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(num_proteins=2) self.model_tester.create_and_check_model_gen(*config_and_inputs) def test_saprot_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True protein_informations = { "input_ids": inputs_dict["protein_input_ids"], "attention_mask": inputs_dict["protein_attention_mask"], } for model_class in self.all_model_classes: if model_class is not EvollaModel: continue model = model_class(config) model.to(torch_device) model.eval() protein_encoder_outputs = model.protein_encoder.model(**protein_informations, return_dict=True) print(model_class, protein_encoder_outputs) def test_protein_encoder_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True protein_informations = { "input_ids": inputs_dict["protein_input_ids"], "attention_mask": inputs_dict["protein_attention_mask"], } for model_class in self.all_model_classes: if model_class is not EvollaModel: continue model = model_class(config) model.to(torch_device) model.eval() protein_encoder_outputs = model.protein_encoder(**protein_informations, return_dict=True) print(model_class, protein_encoder_outputs) def test_single_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) print(outputs) def test_initialization(self): # we skip the latents initialization test config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # skip latents if name.endswith("latents"): print(f"Skipping latents {name}") continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @unittest.skip("Evolla requires both text and protein inputs which is currently not done in this test.") def test_eager_matches_sdpa_inference(self): pass @unittest.skip("Evolla does not support eager attention implementation.") def test_eager_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip( "Evolla has a separate test runner for generation tests with complex inheritance, causing this check to fail." ) def test_generation_tester_mixin_inheritance(self): pass @unittest.skip("Evolla requires both text and protein inputs which is currently not done in this test.") def test_flex_attention_with_grads(self): pass @require_torch class EvollaModelIntegrationTest(TestCasePlus): def _prepare_for_inputs(self): aa_seq = "MLLEETLKSCPIVKRGKYHYFIHPISDGVPLVEPKLLREVATRIIKIGNFEGVNKIVTAEAMGIPLVTTLSLYTDIPYVIMRKREYKLPGEVPVFQSTGYSKGQLYLNGIEKGDKVIIIDDVISTGGTMIAIINALERAGAEIKDIICVIERGDGKKIVEEKTGYKIKTLVKIDVVDGEVVIL" foldseek = "dvvvvqqqpfawdddppdtdgcgclapvpdpddpvvlvvllvlcvvpadpvqaqeeeeeddscpsnvvsncvvpvhyydywylddppdppkdwqwf######gitidpdqaaaheyeyeeaeqdqlrvvlsvvvrcvvrnyhhrayeyaeyhycnqvvccvvpvghyhynwywdqdpsgidtd" question = "What is the function of this protein?" protein_information = { "aa_seq": aa_seq, "foldseek": foldseek, } messages = [ {"role": "system", "content": "You are an AI expert that can answer any questions about protein."}, {"role": "user", "content": question}, ] return protein_information, messages @cached_property def default_processor(self): return EvollaProcessor.from_pretrained("westlake-repl/Evolla-10B-hf") @require_bitsandbytes @slow def test_inference_natural_language_protein_reasoning(self): protein_information, messages = self._prepare_for_inputs() processor = self.default_processor inputs = processor( messages_list=[messages], proteins=[protein_information], return_tensors="pt", padding="longest" ).to(torch_device) # the CI gpu is small so using quantization to fit quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype="float16", ) model = EvollaForProteinText2Text.from_pretrained( "westlake-repl/Evolla-10B-hf", quantization_config=quantization_config, device_map=torch_device, ) generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertIn("This protein", generated_text[0]) self.assertIn("purine", generated_text[0])
transformers/tests/models/evolla/test_modeling_evolla.py/0
{ "file_path": "transformers/tests/models/evolla/test_modeling_evolla.py", "repo_id": "transformers", "token_count": 7017 }
567
# Copyright 2020 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import FunnelConfig, FunnelTokenizer, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, ) class FunnelModelTester: """You can also import this e.g, from .test_modeling_funnel import FunnelModelTester""" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, block_sizes=[1, 1, 2], num_decoder_layers=1, d_model=32, n_head=4, d_head=8, d_inner=37, hidden_act="gelu_new", hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, max_position_embeddings=512, type_vocab_size=3, initializer_std=0.02, # Set to a smaller value, so we can keep the small error threshold (1e-5) in the test num_labels=3, num_choices=4, scope=None, base=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.block_sizes = block_sizes self.num_decoder_layers = num_decoder_layers self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = 2 self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.initializer_std = initializer_std # Used in the tests to check the size of the first attention layer self.num_attention_heads = n_head # Used in the tests to check the size of the first hidden state self.hidden_size = self.d_model # Used in the tests to check the number of output hidden states/attentions self.num_hidden_layers = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: self.expected_num_hidden_layers = self.num_hidden_layers + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) fake_token_labels = ids_tensor([self.batch_size, self.seq_length], 1) config = self.get_config() return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) def get_config(self): return FunnelConfig( vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) model.config.truncate_seq = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) model.config.separate_cls = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) def create_and_check_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelBaseModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) model.config.truncate_seq = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model)) model.config.separate_cls = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForPreTraining(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=fake_token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_choices = self.num_choices model = FunnelForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FunnelModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False all_model_classes = ( ( FunnelModel, FunnelForMaskedLM, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": (FunnelBaseModel, FunnelModel), "fill-mask": FunnelForMaskedLM, "question-answering": FunnelForQuestionAnswering, "text-classification": FunnelForSequenceClassification, "token-classification": FunnelForTokenClassification, "zero-shot": FunnelForSequenceClassification, } if is_torch_available() else {} ) # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = FunnelModelTester(self) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) for param in ["r_w_bias", "r_r_bias", "r_kernel", "r_s_bias", "seg_embed"]: if hasattr(module, param) and getattr(module, param) is not None: weight = getattr(module, param) weight.data.fill_(3) @require_torch class FunnelBaseModelTest(ModelTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False all_model_classes = ( (FunnelBaseModel, FunnelForMultipleChoice, FunnelForSequenceClassification) if is_torch_available() else () ) def setUp(self): self.model_tester = FunnelModelTester(self, base=True) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) # overwrite from test_modeling_common def test_training(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ == "FunnelBaseModel": continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) for param in ["r_w_bias", "r_r_bias", "r_kernel", "r_s_bias", "seg_embed"]: if hasattr(module, param) and getattr(module, param) is not None: weight = getattr(module, param) weight.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers class FunnelModelIntegrationTest(unittest.TestCase): def test_inference_tiny_model(self): batch_size = 13 sequence_length = 7 input_ids = torch.arange(0, batch_size * sequence_length).long().reshape(batch_size, sequence_length) lengths = [0, 1, 2, 3, 4, 5, 6, 4, 1, 3, 5, 0, 1] token_type_ids = torch.tensor([[2] + [0] * a + [1] * (sequence_length - a - 1) for a in lengths]) model = FunnelModel.from_pretrained("sgugger/funnel-random-tiny") output = model(input_ids, token_type_ids=token_type_ids)[0].abs() expected_output_sum = torch.tensor(2344.8352) expected_output_mean = torch.tensor(0.8052) torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4) attention_mask = torch.tensor([[1] * 7, [1] * 4 + [0] * 3] * 6 + [[0, 1, 1, 0, 0, 1, 1]]) output = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0].abs() expected_output_sum = torch.tensor(2343.8425) expected_output_mean = torch.tensor(0.8049) torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4) @slow def test_inference_model(self): tokenizer = FunnelTokenizer.from_pretrained("huggingface/funnel-small") model = FunnelModel.from_pretrained("huggingface/funnel-small") inputs = tokenizer("Hello! I am the Funnel Transformer model.", return_tensors="pt") output = model(**inputs)[0] expected_output_sum = torch.tensor(235.7246) expected_output_mean = torch.tensor(0.0256) torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4)
transformers/tests/models/funnel/test_modeling_funnel.py/0
{ "file_path": "transformers/tests/models/funnel/test_modeling_funnel.py", "repo_id": "transformers", "token_count": 9088 }
568
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import random import tempfile import unittest from collections.abc import Sequence from typing import Optional import numpy as np from parameterized import parameterized from transformers.models.gemma3n import Gemma3nAudioFeatureExtractor from transformers.testing_utils import ( check_json_file_has_correct_format, require_torch, ) from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): pass global_rng = random.Random() MAX_LENGTH_FOR_TESTING = 512 def floats_list(shape, scale=1.0, rng=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for _ in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class Gemma3nAudioFeatureExtractionTester: def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size: int = 128, sampling_rate: int = 16_000, padding_value: float = 0.0, return_attention_mask: bool = False, # ignore hop_length / frame_length for now, as ms -> length conversion causes issues with serialization tests # frame_length_ms: float = 32.0, # hop_length: float = 10.0, min_frequency: float = 125.0, max_frequency: float = 7600.0, preemphasis: float = 0.97, preemphasis_htk_flavor: bool = True, fft_overdrive: bool = True, dither: float = 0.0, input_scale_factor: float = 1.0, mel_floor: float = 1e-5, per_bin_mean: Optional[Sequence[float]] = None, per_bin_stddev: Optional[Sequence[float]] = None, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.sampling_rate = sampling_rate self.padding_value = padding_value self.return_attention_mask = return_attention_mask # ignore hop_length / frame_length for now, as ms -> length conversion causes issues with serialization tests # self.frame_length_ms = frame_length_ms # self.hop_length = hop_length self.min_frequency = min_frequency self.max_frequency = max_frequency self.preemphasis = preemphasis self.preemphasis_htk_flavor = preemphasis_htk_flavor self.fft_overdrive = fft_overdrive self.dither = dither self.input_scale_factor = input_scale_factor self.mel_floor = mel_floor self.per_bin_mean = per_bin_mean self.per_bin_stddev = per_bin_stddev def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "sampling_rate": self.sampling_rate, "padding_value": self.padding_value, "return_attention_mask": self.return_attention_mask, "min_frequency": self.min_frequency, "max_frequency": self.max_frequency, "preemphasis": self.preemphasis, "preemphasis_htk_flavor": self.preemphasis_htk_flavor, "fft_overdrive": self.fft_overdrive, "dither": self.dither, "input_scale_factor": self.input_scale_factor, "mel_floor": self.mel_floor, "per_bin_mean": self.per_bin_mean, "per_bin_stddev": self.per_bin_stddev, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs class Gemma3nAudioFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = Gemma3nAudioFeatureExtractor def setUp(self): self.feat_extract_tester = Gemma3nAudioFeatureExtractionTester(self) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_feat_extract_from_pretrained_kwargs(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained( tmpdirname, feature_size=2 * self.feat_extract_dict["feature_size"] ) mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(2 * mel_1.shape[1] == mel_2.shape[1]) @parameterized.expand( [ ([floats_list((1, x))[0] for x in range(800, 1400, 200)],), ([floats_list((1, x))[0] for x in (800, 800, 800)],), ([floats_list((1, x))[0] for x in range(200, (MAX_LENGTH_FOR_TESTING + 500), 200)], True), ] ) def test_call(self, audio_inputs, test_truncation=False): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_audio_inputs = [np.asarray(audio_input) for audio_input in audio_inputs] input_features = feature_extractor(np_audio_inputs, padding="max_length", return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) # input_features.shape should be (batch, num_frames, n_mels) ~= (batch, num_frames, feature_size) # 480_000 is the max_length that inputs are padded to. we use that to calculate num_frames expected_num_frames = (480_000 - feature_extractor.frame_length) // (feature_extractor.hop_length) + 1 self.assertTrue( input_features.shape[-2] == expected_num_frames, f"no match: {input_features.shape[-1]} vs {expected_num_frames}", ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) encoded_sequences_1 = feature_extractor(audio_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_audio_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) if test_truncation: audio_inputs_truncated = [x[:MAX_LENGTH_FOR_TESTING] for x in audio_inputs] np_audio_inputs_truncated = [np.asarray(audio_input) for audio_input in audio_inputs_truncated] encoded_sequences_1 = feature_extractor( audio_inputs_truncated, max_length=MAX_LENGTH_FOR_TESTING, return_tensors="np" ).input_features encoded_sequences_2 = feature_extractor( np_audio_inputs_truncated, max_length=MAX_LENGTH_FOR_TESTING, return_tensors="np" ).input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_audio_features_attn_mask_consistent(self): # regression test for https://github.com/huggingface/transformers/issues/39911 # Test input_features and input_features_mask have consistent shape np.random.seed(42) feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) for i in [512, 640, 1024]: audio = np.random.randn(i) mm_data = { "raw_speech": [audio], "sampling_rate": 16000, } inputs = feature_extractor(**mm_data, return_tensors="np") out = inputs["input_features"] mask = inputs["input_features_mask"] assert out.ndim == 3 assert mask.ndim == 2 assert out.shape[:2] == mask.shape[:2] def test_dither(self): np.random.seed(42) # seed the dithering randn() # Tests that features with and without little dithering are similar, but not the same dict_no_dither = self.feat_extract_tester.prepare_feat_extract_dict() dict_no_dither["dither"] = 0.0 dict_dither = self.feat_extract_tester.prepare_feat_extract_dict() dict_dither["dither"] = 0.00003 # approx. 1/32k feature_extractor_no_dither = self.feature_extraction_class(**dict_no_dither) feature_extractor_dither = self.feature_extraction_class(**dict_dither) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # compute features input_features_no_dither = feature_extractor_no_dither( np_speech_inputs, padding=True, return_tensors="np", sampling_rate=dict_no_dither["sampling_rate"] ).input_features input_features_dither = feature_extractor_dither( np_speech_inputs, padding=True, return_tensors="np", sampling_rate=dict_dither["sampling_rate"] ).input_features # test there is a difference between features (there's added noise to input signal) diff = input_features_dither - input_features_no_dither # features are not identical self.assertTrue(np.abs(diff).mean() > 1e-6) # features are not too different self.assertTrue(np.abs(diff).mean() <= 1e-4) self.assertTrue(np.abs(diff).max() <= 5e-3) @require_torch def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32)
transformers/tests/models/gemma3n/test_feature_extraction_gemma3n.py/0
{ "file_path": "transformers/tests/models/gemma3n/test_feature_extraction_gemma3n.py", "repo_id": "transformers", "token_count": 5571 }
569
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch InstructBLIP model.""" import inspect import tempfile import unittest import numpy as np import pytest import requests from transformers import ( CONFIG_MAPPING, InstructBlipConfig, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from transformers.testing_utils import ( Expectations, cleanup, require_accelerate, require_bitsandbytes, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask, ) if is_torch_available(): import torch from torch import nn from transformers import InstructBlipForConditionalGeneration, InstructBlipModel, InstructBlipVisionModel if is_vision_available(): from PIL import Image class InstructBlipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in case of a vision transformer, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return InstructBlipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = InstructBlipVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class InstructBlipVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as InstructBLIP's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (InstructBlipVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = InstructBlipVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=InstructBlipConfig, has_text_modality=False, common_properties=["num_query_tokens", "image_token_index"], ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="InstructBLIP's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training") def test_training(self): pass @unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "Salesforce/instructblip-flan-t5-xl" model = InstructBlipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class InstructBlipQFormerModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) qformer_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) qformer_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask, qformer_input_ids, qformer_attention_mask def get_config(self): return InstructBlipQFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) # this class is based on `OPTModelTester` found in tests/models/opt/test_modeling_opt.py class InstructBlipTextModelDecoderOnlyTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=100, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, num_labels=3, word_embed_proj_dim=16, type_sequence_label_size=2, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.num_labels = num_labels self.type_sequence_label_size = type_sequence_label_size self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False def prepare_config_and_inputs(self): config = self.get_config() input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3) input_ids[:, -1] = self.eos_token_id # Eos Token attention_mask = input_ids.ne(self.pad_token_id) return config, input_ids, attention_mask def get_config(self): return CONFIG_MAPPING["opt"]( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, is_encoder_decoder=False, word_embed_proj_dim=self.word_embed_proj_dim, ) # this model tester uses a decoder-only language model (OPT) class InstructBlipForConditionalGenerationDecoderOnlyModelTester: def __init__( self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10, image_token_index=4, ): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {} if text_kwargs is None: text_kwargs = {} self.parent = parent self.vision_model_tester = InstructBlipVisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = InstructBlipQFormerModelTester(parent, **qformer_kwargs) self.text_model_tester = InstructBlipTextModelDecoderOnlyTester(parent, **text_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.seq_length = self.text_model_tester.seq_length + num_query_tokens # need seq_length for common tests self.is_training = is_training self.num_query_tokens = num_query_tokens self.image_token_index = image_token_index def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() _, _, _, qformer_input_ids, qformer_attention_mask = self.qformer_model_tester.prepare_config_and_inputs() _, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() vision_tokens = ( torch.ones((input_ids.shape[0], self.num_query_tokens), device=torch_device, dtype=input_ids.dtype) * self.image_token_index ) input_ids[input_ids == self.image_token_index] = self.text_model_tester.pad_token_id input_ids = torch.cat([vision_tokens, input_ids], dim=-1) vision_attention_mask = torch.ones_like(vision_tokens) attention_mask = torch.cat([vision_attention_mask, attention_mask], dim=-1) return config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values def get_config(self): return InstructBlipConfig.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), text_config=self.text_model_tester.get_config(), num_query_tokens=self.num_query_tokens, image_token_index=self.image_token_index, ) def create_and_check_for_conditional_generation( self, config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values ): model = InstructBlipForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model( pixel_values, input_ids=input_ids, attention_mask=attention_mask, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, ) expected_seq_length = self.num_query_tokens + self.text_model_tester.seq_length self.parent.assertEqual( result.logits.shape, (self.vision_model_tester.batch_size, expected_seq_length, self.text_model_tester.vocab_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "qformer_input_ids": qformer_input_ids, "qformer_attention_mask": qformer_attention_mask, } return config, inputs_dict @require_torch class InstructBlipForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( ( InstructBlipModel, InstructBlipForConditionalGeneration, ) if is_torch_available() else () ) pipeline_model_mapping = {"image-text-to-text": InstructBlipForConditionalGeneration} additional_model_inputs = ["qformer_input_ids", "input_ids"] fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = True test_attention_outputs = False test_torchscript = False _is_composite = True def setUp(self): self.model_tester = InstructBlipForConditionalGenerationDecoderOnlyModelTester(self) self.config_tester = ConfigTester( self, config_class=InstructBlipConfig, has_text_modality=False, common_properties=["num_query_tokens", "image_token_index"], ) def test_config(self): self.config_tester.run_common_tests() def test_for_conditional_generation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs) @unittest.skip( reason=" InstructBlipQFormerModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet." ) def test_eager_matches_sdpa_generate(self): pass @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="InstructBlipForConditionalGeneration doesn't support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Tied weights are tested in individual model tests") def test_tied_weights_keys(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="InstructBlipModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_vision_qformer_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save InstructBlipConfig and check if we can load InstructBlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = InstructBlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save InstructBlipConfig and check if we can load InstructBlipQFormerConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = InstructBlipQFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "Salesforce/instructblip-flan-t5-xl" model = InstructBlipForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) # overwrite because InstructBLIP internally calls LM.generate() with embeds thus it cannot operate in no cache format def _check_generate_outputs(self, output, config, use_cache=False, num_return_sequences=1, num_beams=1): use_cache = True # force this to be True in case False is passed super()._check_generate_outputs( output, config, use_cache=use_cache, num_return_sequences=num_return_sequences, num_beams=num_beams ) # overwrite because InstructBLIP cannot generate only from input ids, and requires `pixel` values and `qformer_input_ids` in all cases to be present @pytest.mark.generate def test_left_padding_compatibility(self): # NOTE: left-padding results in small numerical differences. This is expected. # See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 # First, filter out models that don't support left padding # - The model must have generative capabilities if len(self.all_generative_model_classes) == 0: self.skipTest(reason="No generative architecture available for this model.") # - The model must support padding if not self.has_attentions: self.skipTest(reason="This model doesn't support padding.") # - The model must be a decoder-only architecture (encoder-based architectures use right-padding) decoder_only_classes = [] for model_class in self.all_generative_model_classes: config, _ = self.prepare_config_and_inputs_for_generate() if config.is_encoder_decoder: continue else: decoder_only_classes.append(model_class) if len(decoder_only_classes) == 0: self.skipTest(reason="No decoder-only architecture available for this model.") # - Decoder-only architectures derived from encoder-decoder models could support it in theory, but we haven't # added support for it yet. We skip these models for now. has_encoder_attributes = any( attr_name for attr_name in config.to_dict() if attr_name.startswith("encoder") and attr_name != "encoder_no_repeat_ngram_size" ) if has_encoder_attributes: self.skipTest( reason="The decoder-only derived from encoder-decoder models are not expected to support left-padding." ) # Then, test left-padding def _prepare_model_kwargs(input_ids, attention_mask, signature): model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(attention_mask, dim=-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids if "cache_position" in signature: cache_position = torch.arange(input_ids.shape[-1], device=torch_device) model_kwargs["cache_position"] = cache_position return model_kwargs for model_class in decoder_only_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict.get("attention_mask") pixel_values = inputs_dict["pixel_values"] qformer_input_ids = inputs_dict["qformer_input_ids"] if attention_mask is None: attention_mask = torch.ones_like(input_ids) model = model_class(config).to(torch_device).eval() signature = inspect.signature(model.forward).parameters.keys() # no cache as some models require special cache classes to be init outside forward model.generation_config.use_cache = False # Without padding model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, signature) next_logits_wo_padding = model( **model_kwargs, pixel_values=pixel_values, qformer_input_ids=qformer_input_ids ).logits[:, -1, :] # With left-padding (length 32) # can hardcode pad_token to be 0 as we'll do attn masking anyway pad_token_id = ( config.get_text_config().pad_token_id if config.get_text_config().pad_token_id is not None else 0 ) pad_size = (input_ids.shape[0], 32) padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * pad_token_id padded_input_ids = torch.cat((padding, input_ids), dim=1) padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1) model_kwargs = _prepare_model_kwargs(padded_input_ids, padded_attention_mask, signature) next_logits_with_padding = model( **model_kwargs, pixel_values=pixel_values, qformer_input_ids=qformer_input_ids ).logits[:, -1, :] # They should result in very similar logits torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5) def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(model.language_model.config._attn_implementation == "sdpa") self.assertTrue(model.vision_model.config._attn_implementation == "sdpa") self.assertTrue(model.qformer.config._attn_implementation == "eager") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.language_model.config._attn_implementation == "eager") self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") self.assertTrue(model_eager.qformer.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation == "sdpa" ): raise ValueError("The eager model should not have SDPA attention layers") # We will verify our results on an image of cute cats def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch @slow class InstructBlipModelIntegrationTest(unittest.TestCase): def tearDown(self): cleanup(torch_device, gc_collect=False) @require_bitsandbytes @require_accelerate def test_inference_vicuna_7b(self): processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b") model = InstructBlipForConditionalGeneration.from_pretrained( "Salesforce/instructblip-vicuna-7b", load_in_8bit=True ) url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") prompt = "What is unusual about this image?" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, torch.float16) # verify generation outputs = model.generate(**inputs, max_new_tokens=30) generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip() expected_outputs = Expectations( { ("xpu", 3): [32001] * 32 + [2, 1724, 338, 22910, 1048, 445, 1967, 29973, 450, 22910, 9565, 310, 445, 1967, 338, 393, 263, 767, 338, 13977, 292, 22095, 373, 278, 1250, 310, 263, 13328, 20134, 29963, 1550, 19500, 1623, 263, 19587, 4272, 11952, 29889], ("cuda", None): [32001] * 32 + [2, 1724, 338, 22910, 1048, 445, 1967, 29973, 450, 22910, 9565, 310, 445, 1967, 338, 393, 263, 767, 338, 13977, 292, 22095, 373, 278, 1250, 310, 263, 13328, 20134, 29963, 1550, 19500, 373, 263, 19587, 4272, 11952, 29889], } ) # fmt: off expected_output = expected_outputs.get_expectation() expected_texts = Expectations( { ("xpu", 3): "What is unusual about this image? The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV while driving down a busy city street.", ("cuda", None): "What is unusual about this image? The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV while driving on a busy city street.", } ) # fmt: off expected_text = expected_texts.get_expectation() self.assertEqual(outputs[0].tolist(), expected_output) self.assertEqual(generated_text, expected_text) def test_inference_flant5_xl(self): processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-flan-t5-xl") model = InstructBlipForConditionalGeneration.from_pretrained( "Salesforce/instructblip-flan-t5-xl", dtype=torch.bfloat16, ).to(torch_device) url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") prompt = "What is unusual about this image?" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device) for k, v in inputs.items(): if torch.is_floating_point(v): inputs[k] = v.to(torch.bfloat16) outputs = model.generate( **inputs, do_sample=False, num_beams=5, max_length=256, min_length=1, repetition_penalty=1.5, length_penalty=1.0, temperature=1, ) generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0] expected_outputs = [0, 37, 1023, 9850, 7, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4459, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 37, 388, 19, 5119, 3, 9, 4459, 8677, 28, 3, 9, 4459, 6177, 6, 11, 3, 88, 19, 338, 46, 3575, 53, 1476, 5223, 12, 8, 223, 13, 8, 4049, 5, 37, 1023, 19, 7225, 16, 24, 34, 1267, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 37, 388, 19, 338, 46, 3575, 53, 1476, 5223, 12, 8, 223, 13, 3, 9, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 37, 388, 19, 338, 46, 3575, 53, 1476, 5223, 12, 8, 223, 13, 3, 9, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 37, 1023, 19, 7225, 16, 24, 34, 1267, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 37, 388, 19, 338, 46, 3575, 53, 1476, 5223, 12, 8, 223, 13, 3, 9, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 1] # fmt: skip self.assertEqual(outputs[0].tolist(), expected_outputs) self.assertEqual( generated_text, "The image depicts a man ironing clothes on the back of a yellow van in the middle of a busy city street. The man is wearing a yellow shirt with a yellow tie, and he is using an ironing board attached to the back of the van. The image is unusual in that it shows a man ironing clothes on the back of a van in the middle of a busy city street. The man is using an ironing board attached to the back of a van in the middle of a busy city street. The man is using an ironing board attached to the back of a van in the middle of a busy city street. The image is unusual in that it shows a man ironing clothes on the back of a van in the middle of a busy city street. The man is using an ironing board attached to the back of a van in the middle of a busy city street.", ) def test_inference_interpolate_pos_encoding(self): processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-flan-t5-xl") model = InstructBlipForConditionalGeneration.from_pretrained( "Salesforce/instructblip-flan-t5-xl", dtype=torch.bfloat16, ).to(torch_device) processor.image_processor.size = {"height": 500, "width": 500} image = prepare_img() prompt = "What's in the image?" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device) predictions = model.generate(**inputs, interpolate_pos_encoding=True) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() self.assertEqual( predictions[0].tolist(), [0, 37, 1023, 753, 3, 9, 2335, 3823, 30, 8, 2608, 28, 3, 9, 1782, 5, 1] ) self.assertEqual(generated_text, "The image features a woman sitting on the beach with a dog.")
transformers/tests/models/instructblip/test_modeling_instructblip.py/0
{ "file_path": "transformers/tests/models/instructblip/test_modeling_instructblip.py", "repo_id": "transformers", "token_count": 15529 }
570
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch LayoutLMv2 model.""" import unittest from transformers.testing_utils import ( require_detectron2, require_non_xpu, require_torch, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_detectron2_available, is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn.functional as F from transformers import ( LayoutLMv2Config, LayoutLMv2ForQuestionAnswering, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, LayoutLMv2Model, ) if is_detectron2_available(): from detectron2.structures.image_list import ImageList class LayoutLMv2ModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=4, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=36, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, image_feature_pool_shape=[7, 7, 256], coordinate_size=6, shape_size=6, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.image_feature_pool_shape = image_feature_pool_shape self.coordinate_size = coordinate_size self.shape_size = shape_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t image = ImageList( torch.zeros(self.batch_size, self.num_channels, self.image_size, self.image_size, device=torch_device), self.image_size, ) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = LayoutLMv2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, image_feature_pool_shape=self.image_feature_pool_shape, coordinate_size=self.coordinate_size, shape_size=self.shape_size, ) # use smaller resnet backbone to make tests faster config.detectron2_config_args["MODEL.RESNETS.DEPTH"] = 18 config.detectron2_config_args["MODEL.RESNETS.RES2_OUT_CHANNELS"] = 64 config.detectron2_config_args["MODEL.RESNETS.NUM_GROUPS"] = 1 return config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels def create_and_check_model( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv2Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, image=image, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, image=image) # LayoutLMv2 has a different expected sequence length, namely also visual tokens are added expected_seq_len = self.seq_length + self.image_feature_pool_shape[0] * self.image_feature_pool_shape[1] self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv2ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv2ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "image": image, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_non_xpu @require_torch @require_detectron2 class LayoutLMv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = True test_mismatched_shapes = False all_model_classes = ( ( LayoutLMv2Model, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, LayoutLMv2ForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"document-question-answering": LayoutLMv2ForQuestionAnswering, "feature-extraction": LayoutLMv2Model} if is_torch_available() else {} ) def setUp(self): self.model_tester = LayoutLMv2ModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMv2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @require_torch_multi_gpu @unittest.skip( reason=( "LayoutLMV2 and its dependency `detectron2` have some layers using `add_module` which doesn't work well" " with `nn.DataParallel`" ) ) def test_multi_gpu_data_parallel_forward(self): pass def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # LayoutLMv2 has a different expected sequence length expected_seq_len = ( self.model_tester.seq_length + self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1] ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # LayoutLMv2 has a different expected sequence length expected_seq_len = ( self.model_tester.seq_length + self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1] ) self.assertListEqual( list(hidden_states[0].shape[-2:]), [expected_seq_len, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="We cannot configure detectron2 to output a smaller backbone") def test_model_is_small(self): pass @slow def test_model_from_pretrained(self): model_name = "microsoft/layoutlmv2-base-uncased" model = LayoutLMv2Model.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "backbone" in name or "visual_segment_embedding" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_batching_equivalence(self): def equivalence(tensor1, tensor2): return 1.0 - F.cosine_similarity(tensor1.float().flatten(), tensor2.float().flatten(), dim=0, eps=0) def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) elif batched_object is None: return else: batched_row = batched_object[:1] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( (equivalence(batched_row, single_row_object)) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={equivalence(batched_row, single_row_object)}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0: single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] elif hasattr(value, "tensor"): # layoutlmv2uses ImageList instead of pixel values (needs for torchscript) single_row_input[key] = value.tensor[:single_batch_shape] with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) for key in model_batched_output: recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def prepare_layoutlmv2_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLMv2 forward pass on: # fmt: off input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231 bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231 image = ImageList(torch.randn((2,3,224,224)), image_sizes=[(224,224), (224,224)]) # noqa: E231 attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231 token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231 # fmt: on return input_ids, bbox, image, attention_mask, token_type_ids @require_torch @require_detectron2 class LayoutLMv2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased").to(torch_device) ( input_ids, bbox, image, attention_mask, token_type_ids, ) = prepare_layoutlmv2_batch_inputs() # forward pass outputs = model( input_ids=input_ids.to(torch_device), bbox=bbox.to(torch_device), image=image.to(torch_device), attention_mask=attention_mask.to(torch_device), token_type_ids=token_type_ids.to(torch_device), ) # verify the sequence output expected_shape = torch.Size( ( 2, input_ids.shape[1] + model.config.image_feature_pool_shape[0] * model.config.image_feature_pool_shape[1], model.config.hidden_size, ) ) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.1087, 0.0727, -0.3075], [0.0799, -0.0427, -0.0751], [-0.0367, 0.0480, -0.1358]], device=torch_device ) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-3, atol=1e-3) # verify the pooled output expected_shape = torch.Size((2, model.config.hidden_size)) self.assertEqual(outputs.pooler_output.shape, expected_shape)
transformers/tests/models/layoutlmv2/test_modeling_layoutlmv2.py/0
{ "file_path": "transformers/tests/models/layoutlmv2/test_modeling_layoutlmv2.py", "repo_id": "transformers", "token_count": 11455 }
571
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch LeViT model.""" import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.auto.modeling_auto import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class LevitConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) class LevitModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, kernel_size=3, stride=2, padding=1, patch_size=16, hidden_sizes=[16, 32, 48], num_attention_heads=[1, 2, 3], depths=[2, 3, 4], key_dim=[8, 8, 8], drop_path_rate=0, mlp_ratio=[2, 2, 2], attention_ratio=[2, 2, 2], initializer_range=0.02, is_training=True, use_labels=True, num_labels=2, # Check ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.hidden_sizes = hidden_sizes self.num_attention_heads = num_attention_heads self.depths = depths self.key_dim = key_dim self.drop_path_rate = drop_path_rate self.patch_size = patch_size self.attention_ratio = attention_ratio self.mlp_ratio = mlp_ratio self.initializer_range = initializer_range self.down_ops = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.initializer_range = initializer_range def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return LevitConfig( image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, ) def create_and_check_model(self, config, pixel_values, labels): model = LevitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) image_size = (self.image_size, self.image_size) height, width = image_size[0], image_size[1] for _ in range(4): height = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1) width = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = LevitForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Levit does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) pipeline_model_mapping = ( { "image-feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = LevitModelTester(self) self.config_tester = ConfigTester( self, config_class=LevitConfig, has_text_modality=False, common_properties=["image_size", "num_channels"] ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Levit does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Levit does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Levit does not output attentions") def test_attention_outputs(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = len(self.model_tester.depths) + 1 self.assertEqual(len(hidden_states), expected_num_layers) image_size = (self.model_tester.image_size, self.model_tester.image_size) height, width = image_size[0], image_size[1] for _ in range(4): height = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) width = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:]), [ height * width, self.model_tester.hidden_sizes[0], ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) # special case for LevitForImageClassificationWithTeacher model def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class.__name__ in MODEL_MAPPING_NAMES.values() or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class.__name__ not in [ *MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def test_model_from_pretrained(self): model_name = "facebook/levit-128S" model = LevitModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class LevitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return LevitImageProcessor.from_pretrained("facebook/levit-128S") @slow def test_inference_image_classification_head(self): model = LevitForImageClassificationWithTeacher.from_pretrained("facebook/levit-128S").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expectations = Expectations( { (None, None): [1.0448, -0.3745, -1.8317], ("cuda", 8): [1.0448, -0.3745, -1.8317], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
transformers/tests/models/levit/test_modeling_levit.py/0
{ "file_path": "transformers/tests/models/levit/test_modeling_levit.py", "repo_id": "transformers", "token_count": 7269 }
572